#!/usr/bin/env python # coding: utf-8 #

Dispersion Entropy

#
# # Dispersion Entropy # **Backgorund** # Unlike usual entropy, Dispersion Entropy take the temporal dependency into accounts, same as Sample Entropy and Aproximate Entropy. It is Embeding Based Entropy function. The idea of Dispersion is almost same as Sample and Aproximate, which is to extract Embeddings, estimate their distribuation and compute entropy. However, there is a fine detail that make dispersion entropy more usuful. # 1. First, is to map the distribuation original signal to uniform (using CDF), then divide them into n-classes. This is same as done for quantization process of any normally distributed signal, such as speech. In quantization, this mapping helps to minimize the quantization error, by assiging small quantization steps for samples with high density and large for low. Think this in a way, if in a signal, large number of samples belongs to a range (-0.1, 0.1), near to zero, your almost all the embeddings will have at least one value that is in that range. CDF mapping will avoid that. In this python implimentation, we have included other mapping functions, which are commonly used in speech processing, i.e. A-Law, and µ-Law, with parameter A and µ to control the mapping. # 2. Second, it allows to extract Embedding with delay factor, i.e. if delay is 2, an embeding is continues samples skiping everu other sample. which is kind of decimation. This helps if your signal is sampled at very high sampling frequecy, i.e. super smooth in local region. Consider you hhave a signal with very high smapling rate, then many of the continues samples will have similar values, which will lead to have a very high number of contant embeddings. # 3. Third, actuall not so much of third, but an alternative to deal with signal with very high sampling rate, is by scale factor, which is nothing but a decimator. # In[1]: import numpy as np import matplotlib.pyplot as plt import sys, scipy from scipy import linalg as LA import spkit as sp # # EEG Sample Signal # In[3]: X,ch_names = sp.load_data.eegSample() fs=128 X.shape # In[4]: Xf = sp.filter_X(X,band=[1,20],btype='bandpass',verbose=0) Xf.shape # In[5]: t = np.arange(X.shape[0])/fs plt.figure(figsize=(15,5)) plt.plot(t, Xf + np.arange(14)*200) plt.xlim([0,t[-1]]) plt.show() # # Dispersion Entropy # In[6]: sp.dispersion_entropy # In[7]: Xi = Xf[:,0].copy() # only one channel # ## embeding diamension =2 # In[11]: de,prob,patterns_dict,_,_= sp.dispersion_entropy(Xi,classes=10, scale=1, emb_dim=2, delay=1,return_all=True) de # ### Probability of all the patterns found # In[14]: plt.stem(prob) plt.xlabel('pattern #') plt.ylabel('probability') plt.show() # ### Pattern dictionary # In[13]: patterns_dict # ### top 10 patterns # In[22]: PP = np.array([list(k)+[patterns_dict[k]] for k in patterns_dict]) idx = np.argsort(PP[:,-1])[::-1] PP[idx[:10],:-1] # ## embedding diamension 4 # In[23]: de,prob,patterns_dict,_,_= sp.dispersion_entropy(Xi,classes=20, scale=1, emb_dim=4, delay=1,return_all=True) de # In[24]: PP = np.array([list(k)+[patterns_dict[k]] for k in patterns_dict]) idx = np.argsort(PP[:,-1])[::-1] PP[idx[:10],:-1] # ### top-10, non-constant pattern # In[27]: Ptop = np.array(list(PP[idx,:-1])) idx2 = np.where(np.sum(np.abs(Ptop-Ptop.mean(1)[:,None]),1)>0)[0] plt.plot(Ptop[idx2[:10]].T,'--o') plt.xticks([0,1,2,3]) plt.grid() plt.show() # In[28]: plt.figure(figsize=(15,5)) for i in range(10): plt.subplot(2,5,i+1) plt.plot(Ptop[idx2[i]]) plt.grid() #plt.yticks([]) # # Dispersion Entropy with sliding window # In[29]: de_temporal = [] win = np.arange(128) while win[-1]