from model import Net

import numpy as np
import csv
from scipy.fftpack import fft, dct
from scipy.io import wavfile
import torch
from torch import nn
import torch.nn.functional as F
import sys
import os
import random
from matplotlib import pyplot as plt

folder_filter = ("zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "blank")

# 音频文件采样率为16kHz, 每256个采样值作为一帧, 这样每秒钟约有62帧。
# 对于FFT后得到的功率谱，只有2kHz以下的人声频率段是语音识别需要关心的，
# 因此除去直流部分和高频部分，input feature个数选定为63.
#
INPUT_FEATURE = 63

AUDIO_FRAME_LEN = 256

# Output: 1
# Layer 0: 1  + (2 * 1) = 3
# Layer 1: 3  + (2 * 2) = 7
# Layer 2: 7  + (2 * 4) = 15
# Layer 3: 15 + (2 * 8) = 31
# Layer 4: 31 + (2 * 16) = 63
TCN_SPAN = 63

# 总共进行训练的次数
EPOCH = 20

OUTPUT_FEATURE = len(folder_filter)

BLANK_CLASS = OUTPUT_FEATURE - 1

def audio_to_windows_256 (data):

    # 按帧长取整，抛弃多余部分
    data = data[: (data.size // AUDIO_FRAME_LEN * AUDIO_FRAME_LEN)]

    # 计算功率谱
    data_for_processing  = np.reshape(data,[-1,AUDIO_FRAME_LEN])
    data_for_processing = abs(fft(data_for_processing))

    # 除去直流和高频部分
    reduced_data_for_processing = data_for_processing[:,2:INPUT_FEATURE + 2]
    return reduced_data_for_processing.astype(np.float32)


def create_training_set (filename, class_index, valid_voice_start_idx, valid_voice_end_idx):
    '''
        音频数据以AUDIO_FRAME_LEN为大小分为多段，分别进行FFT提取特征。处理后的数据变成
        一个二维数组，数组每行长度是INPUT_FEATURE，数组列的方向为时间方向。
        为了更有效的利用数据，对数据进行扩展，补充了噪声帧，让训练数据在时间方向上
        满足条件:
        1. 有效数据帧之前有TCN_SPAN - 1个噪声帧
        2. 有效数据帧之后有TCN_SPAN个噪声帧
        3. 有效数据帧长度为 (end_frame - start_frame + 1) = non_blank_frame_num
        因此，扩展后的训练数据长度为 (2 * TCN_SPAN - 1 + non_blank_frame_num)

        标签的设置:
        训练过程中不对输入数据进行pad, 所以前(TCN_SPAN-1)组数据帧没有对应的label,
        实际label长度为(TCN_SPAN + 1 + non_blank_frame_num), 在实际输出的label
        数组中，index 从 (end_frame - start_frame) 到 TCN_SPAN的值为class_index,
        其余为blank, 也就是说，只有有效音频完全被TCN网络覆盖的区域label 为class_index

    '''

    start_frame = valid_voice_start_idx // AUDIO_FRAME_LEN
    end_frame   = valid_voice_end_idx   // AUDIO_FRAME_LEN
    non_blank_frame_num = (end_frame - start_frame + 1)

    _, wav_data = wavfile.read(filename)

    data_for_processing = audio_to_windows_256(wav_data)
    origin_data_len =  data_for_processing.shape[0]

    # 把整段音频最开始或者最末是噪声帧，用它来扩展原始的音频到需要的长度。
    # 如果开始和结尾都不是噪声帧，那么用0来扩展
    if (start_frame > 0):
        noise_to_extend = data_for_processing[0]
    elif (end_frame < (origin_data_len - 1)):
        noise_to_extend = data_for_processing[-1]
    else:
        noise_to_extend = np.ones_like(data_for_processing[0])

    noise_to_extend = noise_to_extend[np.newaxis]

    frame_num_insert_before_start = TCN_SPAN - 1 - start_frame
    frame_num_insert_after_end = TCN_SPAN - (origin_data_len - 1 - end_frame)

    # 前TCN_SPAN-1组frame没有label
    label = np.ones(TCN_SPAN + non_blank_frame_num, dtype=np.int64) * BLANK_CLASS
    label[end_frame - start_frame : TCN_SPAN + 1] = class_index

    # 扩展输入数据长度，补充噪声帧
    data_for_processing = np.concatenate((np.tile(noise_to_extend, (frame_num_insert_before_start, 1)), data_for_processing,
                np.tile(noise_to_extend, (frame_num_insert_after_end, 1) )))

    return { "data_for_processing": data_for_processing, "label" : label }

def csv_record_to_data(cvs_record):
    valid_voice_start_idx = int(cvs_record["kFirst"])
    valid_voice_end_idx = int(cvs_record["kLast"])
    filename = cvs_record["FileName"]
    class_index = cvs_record["Class"]
    return create_training_set(filename,
                               class_index,
                               valid_voice_start_idx,
                               valid_voice_end_idx)

def create_dataset(csv_file):
    with open(csv_file, mode='r') as csv_fd:
        csv_reader = csv.DictReader(csv_fd)
        dataset = list(map(csv_record_to_data, csv_reader))
    return dataset

def train_param():
    net = Net(in_features = INPUT_FEATURE, out_features=OUTPUT_FEATURE)

    optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
    loss_func = torch.nn.CrossEntropyLoss()

    net.train()
    train_dataset = create_dataset("dataset_train.csv")

    for epoch in range(EPOCH):
        print("Epoch: ", epoch)
        # 将训练数据打乱顺序
        random.shuffle(train_dataset)
        for step, record in enumerate(train_dataset):
            data = torch.from_numpy(record["data_for_processing"][np.newaxis])
            target = torch.from_numpy(record["label"][np.newaxis])
            prediction = net(data)
            loss = loss_func(prediction, target)
            if (step % 50 == 0):
                print("Loss: ", loss.data.item())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    torch.save(net, "my_trained.pt")

    net.dump_param("param.h")

def eval_param():
    net = Net(in_features = INPUT_FEATURE, out_features=OUTPUT_FEATURE)
    net = torch.load("my_trained.pt")
    net.eval()

    eval_dataset = create_dataset("dataset_eval.csv")

    total = 0.0
    right = 0.0

    for step, record in enumerate(eval_dataset):
        data = torch.from_numpy(record["data_for_processing"][np.newaxis])
        target = record["label"]

        prediction = net(data)

        prediction = ((F.softmax(prediction, dim=1)).data.numpy()).argmax(axis=1)

        # 这里只验证非 blank的部分
        eval_mask = (target != BLANK_CLASS)
        target = np.extract(eval_mask, target)
        prediction = np.extract(eval_mask, prediction)

        right += ((np.argwhere(prediction == target)).size / 2)
        total += target.size

    print("Validation result: {} / {} = {}".format(right, total, right / total))

# 用真实的一段音频进行测试，画出各个label对应的概率
def real_eval(f_name):
    net = Net(in_features = INPUT_FEATURE, out_features=OUTPUT_FEATURE)
    net = torch.load("my_trained.pt")
    net.eval()

    _, wav_data = wavfile.read(f_name)

    data_for_processing = audio_to_windows_256(wav_data)
    prediction = net(torch.from_numpy(data_for_processing[np.newaxis]))
    prediction = F.softmax(prediction, dim=1)

    origin_data = wav_data[AUDIO_FRAME_LEN * (TCN_SPAN - 1):]

    prediction = prediction.data.numpy()[0]

    plt.subplot(211)
    plt.plot(origin_data)
    plt.subplot(212)
    pre = plt.plot(prediction[:-1].T)
    plt.legend(pre)
    plt.show()

if __name__ == "__main__":
    train_param()
    eval_param()

    if len(sys.argv) > 1:
        real_eval(sys.argv[1])
