import pickle
from abc import ABC

import torch.nn as nn
import torch
from tqdm import tqdm
# from warpctc_pytorch import CTCLoss
import config
import Levenshtein
import data_handler

logger = config.get_logger()


class BaseAsrModel(nn.Module):
    def __init__(self, **config):
        super(BaseAsrModel, self).__init__()
        self.config = config
        self.device = torch.device('cpu')
        self.to(self.device)

    def load(self,
             pretrain_param_path: str = 'F:\code\python\deeplearning\pythonProject\\ai\output\chatbot_pytorch\gxl_model\package_epoch560_masr.pt'):
        param_file = torch.load(pretrain_param_path, map_location=torch.device('cpu'))
        state_dict = param_file['state_dict']
        self.load_state_dict(state_dict)

    def to(self, device, *args, **kwargs):
        self.device = device
        return super().to(device, *args, **kwargs)

    def predict(self, wav_file_path):
        raise NotImplementedError

    def forward(self, *args, **kwargs):
        """
        输入:
        spectrum tensor( batch_size, time_size, freq_size )
        spectrum_len tensor( batch_size, )
        输出:
        logits tensor( batch_size, time_size, vocab_size )
        yp_len tensor(batch_size,)
        """
        return super().forward(*args, **kwargs)

    #  猴子补丁用法
    def to_train(self):
        self.__class__.__bases__ = (TrainableModel,)
        return self

    def decode(self, y_pre, yp_len):
        """
        将预测输出转成识别出的文字/拼音(token)
        输入:
        y_pre: tensor(batch, seq_len, vocab)
        yp_len: tensor(batch,)
        输出:
        texts:str_list(batch,)
        """
        texts = self._default_decode(y_pre, yp_len)
        return texts

    def _default_decode(self, y_pre, yp_lens):
        idxs = torch.topk(y_pre, 2, dim=-1)[1]
        idxs = idxs[:, :, -1]
        texts = []
        for idx, out_len in zip(idxs, yp_lens):
            idx = idx[:out_len]
            text = ""
            last = None
            for i in idx:
                if i.item() not in (last, self.blank):
                    text += self.vocabulary[i.item()] + " "
                last = i
            texts.append(text)
        return texts


class TrainableModel(BaseAsrModel, ABC):
    """
    专门用作训练的功能拓展类
    实现了: save(), loss(), accuracy(), test(), fit()
    通过调用父类的to_train()装载本类实现的功能
    """

    def __init__(self):
        super(TrainableModel, self).__init__()

    def save(self, path: str = config.MODEL_DIR + 'package_common_' + config.get_now() + ".pt"):
        """存储模型参数和训练参数"""
        logger.info('save model to {}'.format(path))
        state_dict = self.state_dict()
        param = self.config
        package = {'state_dict': state_dict, 'param': param}
        torch.save(package, path)

    def loss(self, *pred_target):
        """计算ctc loss
        返回: ctc loss:tensor(loss) 零为数字,采用mean reduction"""
        return self._default_ctc_loss(*pred_target)

    @staticmethod
    def _default_ctc_loss(pred, pred_len, target, target_len):
        """
        pred:( batch_size, time_size, vocab_size)
        target:( batch_size, time_size)
        """
        criterion = nn.CTCLoss(blank=0, reduction='mean')
        # criterion = CTCLoss(size_average=True)
        pred = pred.permute(1, 0, 2)  # -》(time,batch,vocab)
        return criterion(pred, target, pred_len, target_len)

    def accuracy(self, texts, targets, target_lens):
        """
        计算字错误率
        输入:
        text:(batch_size, time_size )
        target:( batch_size, time_size )
        target_lens:( batch_size, )
        输出:
        accuracy:float, batch内平均每句, 句内平均每字的字错误率
        """
        return self._default_ctc_accuracy(texts, targets, target_lens)

    def _default_ctc_accuracy(self, texts, targets, target_lens):
        """
        计算字错误率
        输入:
        text:tensor(batch_size, time_size )
        target:tensor(batch_size, time_size )
        target_lens:tensor( batch_size, )
        返回:
        字错误率(平均每字的距离, 平均每条语句的损失):float
        """
        accuracy = 0
        for text, target, target_len in zip(texts, targets, target_lens):
            target_text = "".join(self.vocabulary[t] for t in target[:target_len])
            #  Levenshtein是用来计算字符串之间举例的一个库
            accuracy += Levenshtein.distance(text, target_text) / target_len
        return accuracy / len(texts)

    def test(self, test_idx_file_path: str, batch_size=64):
        """
        进行测试
        返回:
        accuracy:float, 得到在测试集上的平均每个batch的字识别准确率
        """
        logger.info('start test...')
        self.eval()
        dataset = data_handler.MASRDataset(test_idx_file_path)
        dataloader = data_handler.MASRDataLoader(dataset, batch_size=batch_size, shuffle=False)
        step_num = len(dataloader)
        cer = 0
        for inputs, targets, inputs_len, target_len in tqdm(dataloader):
            inputs, targets, inputs_len, target_len = [x.to(self.device) for x in
                                                       [inputs, targets, inputs_len, target_len]]
            outputs = self.forward(inputs, inputs_len)
            text = self.decode(*outputs)
            cer = cer + self.accuracy(text, targets, target_len)
        self.train()
        return cer / step_num

    def fit(self,
            epochs=10,
            lr_rate=0.6,
            batch_size=10,
            momentum=0.8,
            grad_clip=0.123,
            sorta_grad=True,
            device=torch.device('cpu'),
            train_idx_file_path: str = config.PREHAND_SAVE_DIR + 'prehand_data_train.idx.jsonl',
            dev_idx_file_path: str = config.PREHAND_SAVE_DIR + 'prehand_data_dev.idx.jsonl',
            ):
        """
        进行训练, 会每20个epoch保存模型参数和训练参数,每个epoch日志输出训练损失和测试字错误率
        """
        optimizer = torch.optim.SGD(self.parameters(), lr=float(lr_rate), momentum=float(momentum))
        lr_schedule = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=int(epochs / 10), T_mult=2)
        dataset = data_handler.MASRDataset(train_idx_file_path)
        dataloader_shuffle = data_handler.MASRDataLoader(dataset, batch_size=batch_size, shuffle=True)
        dataloader_sorta = data_handler.MASRDataLoader(dataset, batch_size=batch_size, shuffle=sorta_grad)
        step_num = len(dataloader_shuffle)
        avg_loss = 0
        logger.info('start training...')
        logger.info('use device: ' + str(device))
        for epoch in range(epochs):
            self.train()
            self.to(device)
            if sorta_grad and epoch < 1:
                dataloader = dataloader_sorta
            else:
                dataloader = dataloader_shuffle
            for inputs, targets, inputs_len, target_len in tqdm(dataloader, total=step_num):
                inputs, targets, inputs_len, target_len = [x.to(device) for x in
                                                           [inputs, targets, inputs_len, target_len]]
                optimizer.zero_grad()
                outputs = self.forward(inputs, inputs_len)
                output, output_len = outputs
                loss = self.loss(output, output_len, targets, target_len)
                loss.backward()
                avg_loss += loss.item()
                # print(loss.item())
                if grad_clip:
                    nn.utils.clip_grad_norm_(self.parameters(), grad_clip)
                optimizer.step()
                lr_schedule.step()
            self.eval()
            avg_loss = avg_loss / step_num  # 每个batch的平均loss
            cer_dev = self.test(dev_idx_file_path)
            logger.info('epoch: ' + str(epoch) + ' loss_on_train: ' + str(avg_loss) + ' cer_on_dev: ' + str(cer_dev))
            if epoch % 20 == 0:
                self.save(config.MODEL_DIR + f'package_epoch{epoch}_' + config.get_now() + ".pt")


class ConvBlock(nn.Module):
    def __init__(self, conv, dropout):
        super(ConvBlock, self).__init__()
        self.conv = conv
        nn.init.kaiming_normal_(self.conv.weight)
        self.dropout = nn.Dropout(dropout)
        self.act = nn.GLU(1)

    def forward(self, X):
        x = self.conv(X)
        x = self.act(x)
        x = self.dropout(x)
        return x


class GateConv(BaseAsrModel, ABC):
    def __init__(self, vocabulary, blank=0, name="masr"):
        """
        输入:
        vocabulary : str : string of all labels such that vocaulary[0] == ctc_blank
        blank : ctc_blank的索引=0
        name : str='masr'
        结构:
        input -> conv1d -> conv1d -> conv1d -> conv1d -> conv1d -> conv1d -> conv1d -> conv1d -> output
        第一个卷积的输入维度为80,这是fbank特征的大小
        后一个cnn的输入维度是上个cnn输出维度的一半,这是因为GLU(1)激活函数将dim=1的数据砍半了
        """
        super().__init__(vocabulary=vocabulary, name=name, blank=blank)
        self.blank = blank
        self.vocabulary = vocabulary
        self.name = name
        output_units = len(vocabulary)
        modules = [ConvBlock(nn.Conv1d(80, 500, 48, 2, 97), 0.2)]
        for i in range(7):
            modules.append(ConvBlock(nn.Conv1d(250, 500, 7, 1), 0.3))
        modules.append(ConvBlock(nn.Conv1d(250, 2000, 32, 1), 0.5))
        modules.append(ConvBlock(nn.Conv1d(1000, 2000, 1, 1), 0.5))
        modules.append(torch.nn.utils.weight_norm(nn.Conv1d(1000, output_units, 1, 1)))
        self.cnn = nn.Sequential(*modules)

    def forward(self, x, lens):
        """
        输入:
        x: (batch_size, time_size, freq_size)
        lens: (batch_size,)
        输出:
        x: (batch_size, time_size, vocab_size)
        lens: (batch_size,)
        """
        x = x.permute(0, 2, 1)  # (batch_size, freq_size, time_size)
        x = self.cnn(x)
        x = x.permute(0, 2, 1)  # (batch_size, time_size, vocab_size)
        for module in self.modules():
            if type(module) == nn.modules.Conv1d:
                lens = (
                               lens - module.kernel_size[0] + 2 * module.padding[0]
                       ) // module.stride[0] + 1
        x = torch.nn.functional.log_softmax(x, dim=2)
        return x, lens

    def predict(self, path):
        """
        输入:
        path: 一个wav音频文件的路径
        输出:
        text: 一个str
        """
        self.eval()
        wav = data_handler.load_wav(path)
        spec = data_handler.spectrogram(wav)
        spec.unsqueeze_(0)  # (1, time_size, 80)
        out = self.cnn(spec.permute(0, 2, 1)).permute(0, 2, 1)  # (1, time_size, vocab_size)
        out = torch.nn.functional.log_softmax(out, dim=2)
        out_len = torch.tensor([out.size(1)])
        text = self.decode(out, out_len)
        self.train()
        return text[0]


if __name__ == '__main__':
    """"""
    label_file_path = config.PREHAND_SAVE_DIR + 'token_list.pkl'
    label_list = pickle.load(open(label_file_path, 'rb'))
    model = GateConv(label_list)
    # {"wav_path": "F:\\code\\python\\deeplearning\\pythonProject/ai/data/asr_pytorch/data_thchs30/data/A11_0.wav",
    # "transcript": "lv4 shi4 yang2 chun1 yan1 jing3 da4 kuai4 wen2 zhang1 de5 di3 se4 si4 yue4 de5 lin2 luan2
    # geng4 shi4 lv4 de5 xian1 huo2 xiu4 mei4 shi1 yi4 ang4 ran2"}
    model.load(pretrain_param_path='F:\code\python\deeplearning\pythonProject\\ai\output\chatbot_pytorch\gxl_model\package_epoch4020_masr.pt')
    print(model.predict('F:\code\python\deeplearning\pythonProject\\ai\data\\asr_pytorch\data_thchs30\data\A11_0.wav'))


