import numpy as np
import wave
import pylab as pl
import torch

print(torch.__version__)

#import wav file
filename = ("Alarm01.wav")
f = wave.open(filename,"rb")

#read the format inofrmation
num_frames = f.getnframes()
num_channels = f.getnchannels()
sample_rate = f.getframerate()

#get waveform data
str_data = f.readframes(num_frames)
f.close

# process input wave data
# initial time sequence
wave_data = np.frombuffer(str_data,dtype=np.short)
wave_data.shape = -1,num_channels
wave_data = wave_data.T[0]
wave_data_torch = torch.from_numpy(wave_data)#convert np array to torch tensor

time = np.arange(0 ,num_frames) * (1.0/sample_rate)
time = np.array(time)
time_torch = torch.from_numpy(time)

#pre-emphasis module
signal =  wave_data_torch
emp_factor = 0.95
emphasized_signal1 = (signal[1:]) - (emp_factor) * np.array(signal[:-1])
emphasized_signal = np.append(signal[0],emphasized_signal1)
emphasized_signal = torch.from_numpy(emphasized_signal)

#framing module
# total_frame = 122868 sample_rate = 22050
# choose frame_size = 0.025 = 25ms and frame_stride = 0.01 = 10ms 
frame_size = 0.025
frame_stride = 0.01
frame_length = round(frame_size * sample_rate) 
frame_step = round(frame_stride * sample_rate)
signal_length = num_frames
total_interval = int(np.ceil(float(np.abs(signal_length - frame_length)) / frame_step)) #total intervals of frame obtained 
#generate maximum frame index
max_index = round((signal_length - frame_length)/ frame_step ) 

#slice the signal & time into frames
signal_frame = np.zeros((max_index,frame_length)) #initial a (total_interval,frame_length)array
for n in range(0,max_index):
    signal_frame[n] = np.array(emphasized_signal[n*frame_step:n*frame_step + frame_length])
signal_frame = torch.from_numpy(signal_frame)

time_frame = np.zeros((max_index,frame_length))
for n in range(0,max_index):
    time_frame[n] = np.array(time[n*frame_step: n*frame_step+frame_length])
time_frame = torch.from_numpy(time_frame)

#windowing module
signal_frame_windowing = np.zeros((max_index,frame_length))
for n in range(0,max_index):
    signal_frame_windowing[n] = np.array(signal_frame[n] * np.hamming(frame_length))
signal_frame_windowing = torch.from_numpy(signal_frame_windowing)

print("Check tensors:"
    "\ntime:tensor:", torch.is_tensor(time_torch),
    "\nwave_data: ", torch.is_tensor(wave_data_torch),
    "\nsignal:", torch.is_tensor(signal),
    "\nemphasized_signal: ", torch.is_tensor(emphasized_signal),
    "\nsignal_frame:", torch.is_tensor(signal_frame_windowing),
    "\ntime_frame",torch.is_tensor(time_frame),
    "\nsignal_frame_windowing",torch.is_tensor(signal_frame_windowing),
)

pl.subplot(221)
pl.title(" input waveform")
pl.plot(time_torch,wave_data_torch)

pl.subplot(222)
pl.title("Pre Emphasiszed Waveform")
pl.plot(time, emphasized_signal)

pl.subplot(223)
pl.title("framing sample")
pl.plot(time_frame[10], signal_frame[10])

pl.subplot(224)
pl.title("windowing sample")
pl.plot(time_frame[10], signal_frame_windowing[10])
pl.show()