import math
import pickle

import scipy.io.wavfile as wav
import torch
import tqdm
import torchaudio

import matplotlib.pyplot as plt
import os

from torch import nn
from torch.utils.data import Dataset, DataLoader

import config
import numpy as np


def _show_a_pic_of_wav_data():
    file_name = config.DATADIRPATH + "data_thchs30\data\A2_0.wav"
    fs, wav_signal = torchaudio.load(file_name)
    plt.plot(wav_signal)
    plt.show()


def hamming_func(x):
    """汉明窗函数"""
    return 0.53836 - 0.46164 * np.cos((2 * np.pi * x) / (len(x) - 1))


def hann_func(x):
    """汉宁窗函数"""
    return 0.5 - 0.5 * np.cos((2 * np.pi * x) / (len(x) - 1))


def get_hamming_window(window_len: int = 400):
    step_x = np.linspace(start=0, stop=window_len - 1, num=window_len)
    Y = hamming_func(step_x)
    return Y


def _show_hamming_window():
    step_x = np.linspace(start=0, stop=400 - 1, num=400)

    Y = hamming_func(step_x)
    plt.plot(Y)
    plt.show()


def compute_fbank(filepath, mel_num=100):
    """
    对音频数据分帧
    fs : 采样率，一般为1600， 每秒采样1600点
    wav_signal : 音频数据
    """
    waveform, sample_rate = torchaudio.load(filepath)
    fbank = torchaudio.compliance.kaldi.fbank(waveform=waveform, sample_frequency=sample_rate, num_mel_bins=mel_num)
    return fbank


def show_time_domain_signal(filepath: str):
    res = compute_fbank(filepath)
    plt.imshow(res.T, origin='lower')
    plt.show()


def get_wav_label_filename_list(datadir: str):
    wav_list = []
    label_list = []
    for root, dirs, files in os.walk(datadir):
        for file in files:
            if file.endswith(".wav") or file.endswith(".WAV"):
                wav_list.append(os.path.join(root, file))
                label_list.append(os.path.join(root, file + '.trn'))
    for i in range(len(wav_list)):
        assert wav_list[i].split('.')[0] == label_list[i].split('.')[0]
    return wav_list, label_list


class PingyinVocabTokenizer:
    SOD_TOKEN = "<SOD>"
    EOD_TOKEN = "<EOD>"
    # UNK_TOKEN = "<UNK>"
    PAD_TOKEN = "<PAD>"
    BLANK_TOKEN = "_BLANK_"
    SOD_ID = 0
    EOD_ID = 1
    UNK_ID = 2
    PAD_ID = 3
    BLANK_ID = 4

    def __init__(self, logger=config.get_logger(), tokenizer=lambda x: [y for y in x.strip().split(' ')]):
        self.vocab_set = set()
        self._token_to_id = {}
        self._id_to_token = {}
        self.logger = logger
        self.tokenizer = tokenizer

    def load_vocab(self, vocab_file_path=config.VOCAB_DIRPATH + '/vocab_dict_pinyin.pkl'):
        """加载已保存的字典"""
        with open(vocab_file_path, "rb") as f:
            self.vocab_set = pickle.load(f)
        self._from_set_build_dict()

    def build_vocab(self, data_dir: str = config.DATADIRPATH,
                    vocab_file_path=config.VOCAB_DIRPATH + '/vocab_dict_pinyin.pkl'):
        """构建字典"""
        _, label_file_list = get_wav_label_filename_list(data_dir)
        self.logger.info('start build vocab')
        total = len(label_file_list)
        for filename in tqdm.tqdm(label_file_list, total=total):
            with open(filename, 'r', encoding='utf-8') as f:
                lines = f.readlines()
                pinyin_line = lines[1]
                self.vocab_set.update(self.tokenizer(pinyin_line))
        self._from_set_build_dict()
        with open(vocab_file_path, "wb") as f:
            self.logger.info('build vocab success! ,saved in: ' + vocab_file_path)
            pickle.dump(self.vocab_set, f)

    def _from_set_build_dict(self):
        self._token_to_id = {token: id + 4 for id, token in enumerate(self.vocab_set)}
        self._token_to_id[self.SOD_TOKEN] = self.SOD_ID
        self._token_to_id[self.EOD_TOKEN] = self.EOD_ID
        # self._token_to_id[self.UNK_TOKEN] = self.UNK_ID
        self._token_to_id[self.PAD_TOKEN] = self.PAD_ID
        self._token_to_id[self.BLANK_TOKEN] = self.BLANK_ID
        self._id_to_token = {token_id: token for token, token_id in self._token_to_id.items()}

    def __len__(self):
        return len(self._token_to_id)

    def encode(self, x):
        """x: str, list, torch.Tensor, 只能是一维的token_list
        return -> id_list"""
        if len(self.vocab_set) == 0:
            self.logger.warning("vocab is empty!")
            return
        if isinstance(x, str) and self._token_to_id.get(x, None):
            return [self._token_to_id[x]]  # 如果为单个token
        assert isinstance(x, list | torch.Tensor | str)
        if isinstance(x, str):
            x = self.tokenizer(x)
        if len(x) == 0:
            return []
        elif isinstance(x, list):
            assert isinstance(x[0], str)
            return [self._token_to_id.get(token, self.UNK_ID) for token in x]
        elif isinstance(x, torch.Tensor):
            assert x.dim() == 1
            return [self._token_to_id.get(token.item(), self.UNK_ID) for token in x]

    def decode(self, x: int | list | torch.Tensor):
        """解码，输入int, int_list, int_tensor，只支持一维ids
        return -> token list"""
        if len(self.vocab_set) == 0:
            self.logger.warning("vocab is empty!")
            return
        if isinstance(x, int):
            return [self._id_to_token.get(x, self.UNK_TOKEN)]
        if isinstance(x, list):
            assert isinstance(x[0], int)
            return [self._id_to_token.get(token_id, self.UNK_TOKEN) for token_id in x]
        elif isinstance(x, torch.Tensor):
            assert x.dim() == 1
            return [self._id_to_token.get(token_id, self.UNK_TOKEN) for token_id in x]

    def show_token_id(self):
        for token, id in self._token_to_id.items():
            print(token + ' --- ' + str(id))


def prepare_dataset_to_list(datadir: str = config.DATADIRPATH,
                            save_file_path=config.PREHAND_DATA_DIRPATH + 'prehand_listdata_tuple.pkl',
                            logger=config.get_logger()):
    """从音频、标签存储的目录文件中获取数据并进行预处理"""
    if os.path.exists(save_file_path):
        logger.info('预处理数据已存在，加载已存在数据')
        wav_data_list, label_data_list = pickle.load(open(save_file_path, 'rb'))
        return wav_data_list, label_data_list
    wav_file_list, label_file_list = get_wav_label_filename_list(datadir)
    tokenizer = PingyinVocabTokenizer()
    tokenizer.build_vocab()
    wav_data_list = []
    label_data_list = []
    total = len(wav_file_list)
    logger.info('start prehand data...')
    for wav_file, label_file in tqdm.tqdm(zip(wav_file_list, label_file_list), total=total):
        fbank = compute_fbank(wav_file)
        fbank = fbank[:fbank.shape[0] // 8 * 8, :]  # 模型涉及三个max_pooling,需要数据能被8整除
        wav_data_list.append(fbank)
        with open(label_file, 'r', encoding='utf-8') as f:
            label_text = f.readlines()[1]
            label_data_list.append(tokenizer.encode(label_text))
    with open(save_file_path, 'wb') as f:
        logger.info('数据处理完毕，存放中...')
        pickle.dump((wav_data_list, label_data_list), f)
        logger.info('数据预存放完毕，存放至：' + save_file_path)
    return wav_data_list, label_data_list


def truncate_pad_for_tensor(line: torch.Tensor, num_steps, padding_token):
    """截断或填充⽂本序列
    第一维度为seq维度，后面所有维度视为一体
    >demo:
    line = torch.ones(5, 2, 3, 2)
    a = truncate_pad_for_tensor(line, 7, -1)
    print(a)"""
    if len(line) >= num_steps:
        return line[:num_steps]  # 截断
    shape = line.shape
    padding_shape = (num_steps - len(line), *shape[1:])
    padding_part = torch.full(padding_shape, padding_token)
    return torch.cat([line, padding_part], dim=0)


class ASRDataset(Dataset):
    def __init__(self, wav_data_list, label_data_list):
        super(ASRDataset, self).__init__()
        self.wav_data_list = wav_data_list
        self.label_data_list = label_data_list

    def __len__(self):
        return len(self.wav_data_list)

    def __getitem__(self, index):
        return (self.wav_data_list[index]).clone().detach(), \
               torch.tensor(self.label_data_list[index])


from torch.nn.utils.rnn import pad_sequence


def collate_fn(batch):
    # batch 是一个列表，每个元素是一个元组 (data, target)
    # data 是一个不定长的张量，target 也是一个不定长的张量
    # data_list = [item[0] for item in batch]
    # target_list = [item[1] for item in batch]
    data_list, target_list = zip(*batch)
    data_lens = torch.tensor([len(item) for item in data_list])
    target_lens = torch.tensor([len(item) for item in target_list])
    # 对 data_list 进行填充，返回一个二维张量
    data_tensor = pad_sequence(data_list, batch_first=True, padding_value=-100)
    target_tensor = pad_sequence(target_list, batch_first=True, padding_value=PingyinVocabTokenizer.PAD_ID)
    return data_tensor, data_lens, target_tensor, target_lens  # torch.Size([2, 1040, 200]) torch.Size([2, 27])


# 创建 dataloader，并传递 collate_fn
def get_iter():
    dataset = ASRDataset(*prepare_dataset_to_list())
    dataloader = DataLoader(dataset, batch_size=2, shuffle=True, collate_fn=collate_fn)
    return dataloader
def get_dataset():
    dataset = ASRDataset(*prepare_dataset_to_list())
    return dataset


def get_loss():
    loss = nn.CTCLoss(blank=PingyinVocabTokenizer.BLANK_ID, reduction='mean')
    return loss


if __name__ == '__main__':
    """"""
    pin = PingyinVocabTokenizer()
    pin.build_vocab()
    print(len(pin))
