# Let's do the ususal necessary and nice-to-have imports
#matplotlib inline
import matplotlib.pyplot as plt  # plotting
import seaborn as sns; sns.set() # styling (uncomment if you want)
import numpy as np               # math

# download speech and noise example files
s_file_name = 'speech_8kHz_murder.wav'
#!curl https://staffwww.dcs.shef.ac.uk/people/S.Goetze/sound/{s_file_name} -o {s_file_name} 

import soundfile as sf
from IPython import display as ipd

# load speech wave into variable
s, fs = sf.read(s_file_name)

print('File "' + s_file_name + '" loaded. Its sampling rate is ' + str(fs) + ' Hz.')

# listen to the sound file (if you want)
ipd.Audio(s, rate=fs)

# lets cut out a piece of the data and cisualise it
start_sample = int(10*fs);                   # start at 10 sec
#print(start_sample)
no_of_samples = 4096;                        # no of samples we want to cut out    
end_sample   = start_sample + no_of_samples; # last sample to be cut out
sample_vec = np.linspace(start_sample, end_sample, no_of_samples)
x1=s[start_sample:end_sample];

plt.figure(figsize=(8,5))
plt.subplot(2,1,1)
plt.plot(np.arange(0,len(s)),s)
plt.ylabel('$x_1[k]$')
plt.plot(sample_vec,x1,'r')
plt.subplot(2,1,2)
plt.plot(sample_vec,x1,'r')
plt.xlabel('$k$')
plt.ylabel('$x_1$[' + str(start_sample) + '...' + str(end_sample) + ']')
plt.title('$x_1[k]$ for ' + str(len(x1)) + ' samples between ' 
          + str(start_sample) + ' to ' + str(end_sample) + 
          ' ($f_s$=' + str(fs) +')')
plt.tight_layout() # this allowes for some space for the title text.
#plt.show()




#lets cut out a piece of the data
Lw   = 512                    # frame length
Lov  = 1                      # frame overlap factor 
Lhop = int(np.round(Lw/Lov)); # frame hop size

# creating grid of axes for subplots
plt.figure(figsize=(12, 6))
ax1 = plt.subplot2grid(shape=(2, 5), loc=(0, 0), colspan=5)
ax2 = plt.subplot2grid(shape=(2, 5), loc=(1, 0), colspan=1)
ax3 = plt.subplot2grid(shape=(2, 5), loc=(1, 1), colspan=1)
ax4 = plt.subplot2grid(shape=(2, 5), loc=(1, 2), colspan=1)
ax5 = plt.subplot2grid(shape=(2, 5), loc=(1, 3), colspan=1)
ax6 = plt.subplot2grid(shape=(2, 5), loc=(1, 4), colspan=1)
ax_blocks = [ax2, ax3, ax4, ax5, ax6]

# plot signal part in upper panel (axis ax1)
ax1.plot(sample_vec,x1,'r') #
ax1.set_xlabel('$k$')
ax1.set_ylabel('$x_1$[' + str(start_sample) + '...' + str(end_sample) + ']');
ax1.set_title('A piece between sample ' + str(start_sample) + 
          ' and ' + str(end_sample) + ' (of length ' + str(len(x1)) + 
          ') from the 1st channel ($f_s$=' + str(fs) +')')
          
# plot single blocks in lower panels 
clrs = ['g','y','m','b','c','k']; # define a vector of colours
for ii,k in enumerate(range(start_sample,start_sample+5*Lhop,Lhop)):
    block_k_vec = np.arange(k,k+Lw)
    block_sig_vec = x1[ii*Lhop:ii*Lhop+Lw]
    ax1.plot(block_k_vec,block_sig_vec,clrs[ii])

    ax_blocks[ii].plot(block_k_vec,block_sig_vec,clrs[ii])
    ax_blocks[ii].set_xlabel('$k$')
    ax_blocks[ii].set_ylim(-0.35, 0.35)
    ax_blocks[ii].set_title('Block ' + str(ii))

# automatically adjust padding horizontally 
# as well as vertically.
plt.tight_layout()
#plt.show()




file_name = 'voiced_unvoiced_e.wav'
#file_name = 'word_fish.wav'         # another one to play around with (if you like)

# download speech and noise example files
#!curl https://staffwww.dcs.shef.ac.uk/people/S.Goetze/sound/{file_name} -o {file_name} 
# load speech wave into variable
e, fs_e = sf.read(file_name)

print('File "' + file_name + '" loaded. It has a sampling rate of f_s = ' + str(fs_e) + ' Hz.')

file_name = 'voiced_unvoiced_z.wav'
#file_name = 'word_speech.wav'     # another one to play around with (if you like)

# download speech and noise example files
#!curl https://staffwww.dcs.shef.ac.uk/people/S.Goetze/sound/{file_name} -o {file_name} 
# load speech wave into variable
z, fs_z = sf.read(file_name)

print('File "' + file_name + '" loaded. It has a sampling rate of f_s = ' + str(fs_z) + ' Hz.')

# listen to the sound file (if you want)
ipd.Audio(e, rate=fs_e)
ipd.Audio(z, rate=fs_z)




def calc_STE(signal, sampsPerFrame):
    nFrames       = int(len(signal) / sampsPerFrame)        # number of non-overlapping 
    E = np.zeros(nFrames)
    for frame in range(nFrames):
        startIdx = frame * sampsPerFrame
        stopIdx = startIdx + sampsPerFrame
        E[frame]=np.sum(signal[startIdx:stopIdx] ** 2)
    return E

def calc_ZCR(signal, sampsPerFrame):
    nFrames = int(len(signal) / sampsPerFrame)        # number of non-overlapping 
    ZCR = np.zeros(nFrames)
    for frame in range(nFrames):
        startIdx = frame * sampsPerFrame
        stopIdx = startIdx + sampsPerFrame
        signalframe = signal[startIdx:stopIdx]
        for k in range(1, len(signalframe)):
            ZCR[frame] += 0.5 * abs(np.sign(signalframe[k]) - np.sign(signalframe[k - 1]))
    return ZCR
    
signal = e
sampsPerFrame = int(0.02 * fs_e) #20 ms

# creating grid for subplots
plt.figure(figsize=(12, 6))
plt.subplot(2,1,1)
plt.plot(signal)
plt.subplot(2,1,2)
#plt.plot(calc_STE(signal, sampsPerFrame))
plt.plot(calc_ZCR(signal, sampsPerFrame))

plt.title('(Short-term) Energy per block ($L_{\mathrm{Bl}}=' + 
          str(sampsPerFrame) + '$, which is ' + 
          str(sampsPerFrame/fs_e*1000) + 'ms @ $f_s=' + str(fs_e) +'$)')
#plt.text(18,0.3, 'Short Term Energy is higher for voiced speech parts', style='italic',
#        bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10})
plt.tight_layout() # automatically adjust padding to make space for titles

ipd.Audio(e, rate=fs_e) # add possibility here to listen to the sound once again

signal = z
sampsPerFrame = int(0.02 * fs_z) #20 ms

# plot
plt.figure(figsize=(12, 6))
plt.subplot(2,1,1)
plt.plot(signal)
plt.subplot(2,1,2)
#plt.plot(calc_STE(signal, sampsPerFrame))
plt.plot(calc_ZCR(signal, sampsPerFrame))

plt.title('(Short-term) Energy per block ($L_{\mathrm{Bl}}=' + 
          str(sampsPerFrame) + '$, which is ' + 
          str(sampsPerFrame/fs_z*1000) + 'ms @ $f_s=' + str(fs_z) +'$)')
#plt.text(13,0.09, 'Short Term Energy is higher for voiced speech parts', style='italic',
#        bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10})
plt.tight_layout() # automatically adjust padding to make space for titles

ipd.Audio(z, rate=fs_z) # add possibility here to listen to the sound once again
plt.show()