options(repr.plot.width=3.5, repr.plot.height=3.5) options(warn=-1) # turns off warnings, to turn on: "options(warn=0)" library(Matrix) library(stringr) library(ape) # Importing the libraries for (f in list.files(path="nt_toolbox/toolbox_general/", pattern="*.R")) { source(paste("nt_toolbox/toolbox_general/", f, sep="")) } for (f in list.files(path="nt_toolbox/toolbox_signal/", pattern="*.R")) { source(paste("nt_toolbox/toolbox_signal/", f, sep="")) } p = 0.1 n = 512 x = ceiling(runif(n, 0, 1) > p ) +1 h = c(sum(x==1), sum(x==2)) h = h/sum(h) print(paste("Empirical p =" , h[1])) e = - sum(h*log2(c(max(h[1],1e-20), max(h[2],1e-20)))) print(paste("Entropy = ", e)) h = c(.1, .15, .4, .15, .2) m = length(h) T = list(list()) #we use the symbols i = 1,2,3,4,5 (as strings) with the associated probabilities h(i) for (i in (1:m)) { T[[i]] = list(toString(i), h[i]) } while (length(T)>=2) { T = T[order(sapply(T,'[[',2))] q = as.numeric(T[[1]][2])+as.numeric(T[[2]][2]) t = T[1:2] T = T[-(1:2)] T[[length(T)+1]] = list(t,q) } trim = function(T) { T0 = T[[1]][1] if (typeof(T0[[1]][1]) == 'character') { return (T0) } else { return (list(trim(T0[[1]][1]),trim(T0[[1]][2]))) } } K = list() K[[1]] = list(trim(T)[[1]][[1]][1],trim(T)[[2]]) T = K # flatten list x2 = paste0(lapply(T, function(y) paste0("(", paste0(y, collapse = ","), ")")), collapse = ",") # remove unwanted characters x2 = gsub('\"|c|list| ', "", x2) x2 = paste0("(", x2, ");") # remove brackets from single term list object x3 = str_replace_all(x2, "\\([a-z]*\\)", function(x) gsub("^\\(|\\)$", "", x)) # plot plot(read.tree(text = x3), color="blue") codes = list() c = compute_huffcode(h) for (i in (1:length(h))) { codes[[toString(i)]] = c[i] } for (e in (1:length(codes))) { print(paste("Code of token", e, "=", codes[[toString(e)]])) } n = 1024 rand_discr = function (p, m) { # rand_discr - discrete random generator # y = rand_discr(p, n); # y is a random vector of length n drawn from # a variable X such that # p(i) = Prob( X=i ) p = p/sum(p) n = length(p) coin = runif(m, 0, 1) cumprob = c(0,cumsum(p)) sample = matrix(0, nrow=1, ncol=m) for (j in (1:n)) { ind = c((coin > cumprob[j]) & (coin <= cumprob[j+1])) sample[ind] = j } return (sample) } x = rand_discr(h, n) source("nt_solutions/coding_2_entropic/exo1.R") ## Insert your code here. e = - sum(h*log2(c(max(h[1],1e-20),max(h[2],1e-20),max(h[3],1e-20),max(h[4],1e-20),max(h[5],1e-20)))) print(paste("Entropy bound = ", n*e)) print(paste("Huffman code = ", nchar(y))) x1 = c() T0 = K for (e in strsplit(y,split='')) { if (e == '0') { T0 = T0[[1]] } else { T0 = T0[[1]][[2]] } if (typeof(T0) == 'character') { i = i+1 x1 = c(x1,T0) T0 = T } } err = norm(x - as.double(x1)) print(paste("Error (should be zero) : ", err)) t = .12 h = c(t, 1-t) n = 4096 * 2 x = (runif(n, 0, 1) >t) +1 q = 3 m = 2 n1 = (floor(n/q)+1)*q x1 = matrix(0, nrow=1, ncol=n1) x1[1:length(x)] = x x1[length(x):length(x1)] = 1 x1 = x1 - 1 x2 = c() for (i in seq(1,n1,by=q)) { mult = m**c(1:q-1) x2 = c(x2, sum(x1[i:(i+q)]*mult)) } H = h for (i in (1:(q-1))) { Hold = H H = c() for (j in (1:length(h))) { H = c(H, h[j]*Hold) } } H = h for (i in (1:(q-1))) { H = kronecker(H,h) } H source("nt_solutions/coding_2_entropic/exo2.R") ## Insert your code here.