import numpy
import pyaudio as pa

import time
import sys
import math
import numpy as np
from matplotlib import pyplot as plt
import scipy.signal as signal
import torch
import struct
from nn.cnn import ConvNeuralNet
sys.path.append('audio_feature')
import extract_feature

from spectrogram import (
fs,FFTLength,frameSamples,overlapSamples,numBands
)

import spectrogram as spec

import filter_bank
import window

from nn.datastore import load_classes
CHANNELS = 1
RATE = fs


numBands = 50;



fulldata = None
dry_data = None

numHops = 98
numClasses = 31
samples = 0


def _get_sample(cp,  i):

    return

def to_sample(data):
    l=len(data)//2
    samples=[0]*l
    for i in range(l):
        start = i * 2
        end = start + 2
        samples[i]=struct.unpack_from('h', data[start:end])[0]
    return samples

def data_rms(cp):

    sample_count=len(cp)

    sum_squares = sum(sample**2 for sample in cp)

    return int(math.sqrt(sum_squares / sample_count))

class speech():
    def __init__(self):
        # soundtrack properties
        self.format = pa.paInt16
        self.rate = 16000
        self.channel = 1
        self.chunk = 400
        self.threshold = 150
        self.file = 'audio.wav'

        # intialise microphone stream
        self.audio = pa.PyAudio()
        self.stream = self.audio.open(format=self.format,
                                  channels=self.channel,
                                  rate=self.rate,
                                  input=True,
                                  frames_per_buffer=self.chunk)


    def read(self):

        while True:
            data = self.stream.read(self.chunk)
            samples=to_sample(data)
            rms = data_rms(samples) #get input volume
            if rms>self.threshold: #if input volume greater than threshold
                break

        # array to store frames
        frames = []
        # record upto silence only
        while rms>self.threshold:
            frames.extend(samples)
            data = self.stream.read(self.chunk)
            samples = to_sample(data)
            rms = data_rms(samples)

        return frames

    def close(self):
        # close stream
        self.stream.stop_stream()
        self.stream.close()
        self.audio.terminate()

def main():

    classes=load_classes(".")

    bark_fbank, _, _ = filter_bank.gen_bark_filter_bank(FFTLength, fs, numBands)
    spec.afe = extract_feature.audioFeatureExtractor(
        SampleRate=fs,
        FFTLength=FFTLength,
        Window=window.hann(frameSamples, "periodic"),
        OverlapLength=overlapSamples,
        FilterBank=bark_fbank)


    model = ConvNeuralNet(numHops, numClasses)
    model.load_state_dict(torch.load("speech-cmd-model.pth"))
    model.eval()

    audio_reader=speech()
    print("start")
    stream=audio_reader.read()
    print("stopped")


    l=len(stream)
    if l> 16000:
        l=16000

    numpydata = numpy.asarray(stream[0:l],dtype=numpy.int16)
    data=numpydata / 32768
    spectrogram = spec.gen_spectrogram(data)
    inputs = torch.from_numpy(np.asarray([[spectrogram]], dtype=np.float32))
    y_pred = model(inputs)


    argmax = torch.argmax(y_pred, 1)
    i=argmax.item()


    # define grid of plots
    fig, axs = plt.subplots(nrows=2, ncols=1)

    fig.suptitle(classes[i])
    axs[0].plot(numpydata)


    x = spectrogram.shape[0]
    y = spectrogram.shape[1]
    Y = numpy.arange(0, y / 100, 0.01)
    X = numpy.arange(0, x / 100, 0.01)
    c = axs[1].pcolormesh(X, Y, spectrogram.T, shading='nearest')

    stream = audio_reader.audio.open(rate=int(fs), format=pa.paInt16, channels=1, output=True)

    stream.write(numpydata.tobytes())

    stream.close()  # this blocks until sound finishes playing


    plt.show()

    audio_reader.close()



print("press CTRL C to exit.")
while True:
    try:
        main()
    except KeyboardInterrupt:
        print("Bye")
        sys.exit()
