# -*- coding: utf-8 -*-

import os
from collections import Counter

import numpy as np
import scipy.io.wavfile as wav
from python_speech_features import mfcc

from asr_config import Config
import random

def get_wavs_lables(train_split=0.9):
    """
    获得wav文件路径列表和label列表，并自动划分训练集和测试集
    :param train_split: 训练集比例
    :return: 训练集和测试集的wav文件路径列表和label列表
    """
    conf = Config()
    wav_files, text_labels = do_get_wavs_lables(conf.get("FILE_DATA").wav_path,
                                                conf.get("FILE_DATA").label_file)

    # 打乱数据,设置随机数种子
    random.seed(1)
    combined = list(zip(wav_files, text_labels))
    random.shuffle(combined)
    wav_files[:], text_labels[:] = zip(*combined)

    # 计算划分点
    split_point = int(len(wav_files) * train_split)

    # 划分训练集和测试集
    train_wav_files = wav_files[:split_point]
    train_text_labels = text_labels[:split_point]
    test_wav_files = wav_files[split_point:]
    test_text_labels = text_labels[split_point:]

    print("训练集 wav:", len(train_wav_files), "/ label:", len(train_text_labels),end='; ')
    print("测试集 wav:", len(test_wav_files), "/ label:", len(test_text_labels))
    #将训练集和测试集的wav_files写入txt文件
    with open('wavfiles_train.txt', 'w') as f:
        for wav_file in train_wav_files:
            f.write(wav_file + '\n')
    with open('wavfiles_test.txt', 'w') as f:
        for wav_file in test_wav_files:
            f.write(wav_file + '\n')

    return (train_wav_files, train_text_labels), (test_wav_files, test_text_labels)


def do_get_wavs_lables(wav_path, label_file):
    """
    读取wav文件对应的label
    :param wav_path:
    :param label_file:
    :return:
    """
    # 获得训练用的wav文件路径列表
    wav_files = []
    for (dirpath, dirnames, filenames) in os.walk(wav_path):
        for filename in filenames:
            if filename.endswith('.wav') or filename.endswith('.WAV'):
                filename_path = os.sep.join([dirpath, filename])
                if os.stat(filename_path).st_size < 240000:  # 剔除掉一些小文件
                    continue
                wav_files.append(filename_path)

    labels_dict = {}
    with open(label_file, 'rb') as f:
        for label in f:
            label = label.strip(b'\n')
            label_id = label.split(b' ', 1)[0]
            label_text = label.split(b' ', 1)[1]
            labels_dict[label_id.decode('ascii')] = label_text.decode('utf-8')

    labels = []
    new_wav_files = []
    for wav_file in wav_files:
        # print(os.path.basename(wav_file))
        wav_id = os.path.basename(wav_file).split('.')[0]

        if wav_id in labels_dict:
            labels.append(labels_dict[wav_id])
            new_wav_files.append(wav_file)

    return new_wav_files, labels


def create_dict(text_labels):
    """
    构建字典,字典中包含所有的字符
    :param text_labels:
    :return:
    """
    all_words = []
    for label in text_labels:
        # print(label)
        all_words += [word for word in label]
    counter = Counter(all_words)
    words = sorted(counter)
    words_size = len(words)
    word_num_map = dict(zip(words, range(words_size)))
    print('字表大小:', words_size)

    return words_size, words, word_num_map


def next_batch(start_idx=0,
               batch_size=1,
               n_input=None,
               n_context=None,
               labels=None,
               wav_files=None,
               word_num_map=None):
    """
    按批次获取样本
    :param start_idx:
    :param batch_size:
    :param n_input:
    :param n_context:
    :param labels:
    :param wav_files:
    :param word_num_map:
    :return:
    """
    filesize = len(labels)
    end_idx = min(filesize, start_idx + batch_size)
    idx_list = range(start_idx, end_idx)
    txt_labels = [labels[i] for i in idx_list]
    wav_files = [wav_files[i] for i in idx_list]
    audio_features, audio_features_len, text_vector, text_vector_len = get_audio_mfcc_features(None,
                                                                                               wav_files,
                                                                                               n_input,
                                                                                               n_context,
                                                                                               word_num_map,
                                                                                               txt_labels)

    start_idx += batch_size
    # 验证 start_idx
    if start_idx >= filesize:
        start_idx = -1

    # 如果多个文件将长度统一，支持按最大截断或补0
    audio_features, audio_features_len = pad_sequences(audio_features)
    sparse_labels = sparse_tuple_from(text_vector)

    return start_idx, audio_features, audio_features_len, sparse_labels, wav_files


def get_audio_mfcc_features(txt_files, wav_files, n_input, n_context, word_num_map, txt_labels=None):
    """
    提取音频数据的MFCC特征
    :param txt_files:
    :param wav_files:
    :param n_input:
    :param n_context:
    :param word_num_map:
    :param txt_labels:
    :return:
    """
    audio_features = []
    audio_features_len = []
    text_vector = []
    text_vector_len = []
    if txt_files != None:
        txt_labels = txt_files

    # 先收集所有原始特征
    raw_audio_features = []
    raw_audio_lengths = []

    # 先收集所有文本向量
    raw_text_vectors = []
    raw_text_lengths = []

    for txt_obj, wav_file in zip(txt_labels, wav_files):
        # 载入音频数据并转化为特征值
        audio_data = audiofile_to_input_vector(wav_file, n_input, n_context)
        audio_data = audio_data.astype('float32')

        raw_audio_features.append(audio_data)
        raw_audio_lengths.append(len(audio_data))

        # 载入音频对应的文本
        target = []
        if txt_files != None:  # txt_obj是文件
            target = trans_text_ch_to_vector(txt_obj, word_num_map)
        else:
            target = trans_text_ch_to_vector(None, word_num_map, txt_obj)  # txt_obj是labels
        raw_text_vectors.append(target)
        raw_text_lengths.append(len(target))

        # 在asr_utils.py中添加调试输出
        # print(f"特征长度: {audio_data.shape[0]}, 标签长度: {len(target)}")
        # print(f"特征: {audio_data.shape}, 标签: {target}")


    # 对音频特征进行填充对齐
    if len(raw_audio_features) > 0:
        audio_features, audio_features_len = pad_sequences(raw_audio_features)
    else:
        audio_features = np.array([])
        audio_features_len = np.array([])

    # 对文本向量进行填充对齐
    if len(raw_text_vectors) > 0:
        # 找到最大长度
        max_text_len = max(len(text) for text in raw_text_vectors)

        # 对文本向量进行填充
        padded_text_vectors = []
        for text in raw_text_vectors:
            padded_text = text[:max_text_len]  # 截断长文本
            padded_text += [0] * (max_text_len - len(text))  # 用0填充短文本
            padded_text_vectors.append(padded_text)

        text_vector = np.array(padded_text_vectors)
    else:
        text_vector = np.array([])

    audio_features_len = np.asarray(audio_features_len)
    text_vector_len = np.asarray(raw_text_lengths)

    return audio_features, audio_features_len, text_vector, text_vector_len

def sparse_tuple_from(sequences, dtype=np.int32):
    """
    密集矩阵转稀疏矩阵
    :param sequences:
    :param dtype:
    :return:
    """
    indices = []
    values = []

    for n, seq in enumerate(sequences):
        indices.extend(zip([n] * len(seq), range(len(seq))))
        values.extend(seq)

    indices = np.asarray(indices, dtype=np.int64)
    values = np.asarray(values, dtype=dtype)
    # temp = indices.max(0)
    shape = np.asarray([len(sequences), indices.max(0)[1] + 1], dtype=np.int64)

    # return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
    return indices, values, shape


def trans_text_ch_to_vector(txt_file, word_num_map, txt_label=None):
    """
    中文字符到向量
    :param txt_file:
    :param word_num_map:
    :param txt_label:
    :return:
    """
    words_size = len(word_num_map)

    to_num = lambda word: word_num_map.get(word, words_size)

    if txt_file != None:
        txt_label = get_ch_lable(txt_file)

    # print(txt_label)
    labels_vector = list(map(to_num, txt_label))
    # print(labels_vector)
    return labels_vector


def get_ch_lable(txt_file):
    labels = ""
    with open(txt_file, 'rb') as f:
        for label in f:
            # labels =label.decode('utf-8')
            labels = labels + label.decode('gb2312')
            # labels.append(label.decode('gb2312'))

    return labels


def trans_tuple_to_texts_ch(tuple, words):
    """
    向量转换成文字
    :param tuple:
    :param words:
    :return:
    """
    indices = tuple[0]
    values = tuple[1]
    results = [''] * tuple[2][0]
    #print('word len is:' , len(words))
    for i in range(len(indices)):
        index = indices[i][0]
        c = values[i]
        c = ' ' if c == 0 else words[c]  # chr(c + FIRST_INDEX)
        results[index] = results[index] + c

    return results


def trans_array_to_text_ch(value, words):
    """
    将向量转换成文字
    :param value:
    :param words:
    :return:
    """
    results = ''
    #print('trans_array_to_text_ch len:', len(value))
    for i in range(len(value)):
        results += words[value[i]]  # chr(value[i] + FIRST_INDEX)
    return results.replace('`', ' ')


def audiofile_to_input_vector(audio_filename, n_input, n_context):
    """
    将音频装换成MFCC
    :param audio_filename:
    :param n_input: MFCC特征维度（现在是39）
    :param n_context: 上下文帧数（现在是9）
    :return: 形状为 [max_time_steps, features] 的特征矩阵
    """
    # 加载wav文件
    fs, audio = wav.read(audio_filename)

    # 获取mfcc数值 - 确保生成n_input维特征
    if n_input == 13:
        # 基本MFCC系数
        # print("13维MFCC")
        orig_inputs = mfcc(audio, samplerate=fs, numcep=13)
    elif n_input == 26:
        # print("26维MFCC")
        # MFCC系数 + 一阶差分
        orig_inputs = mfcc(audio, samplerate=fs, numcep=13)
        # 计算一阶差分
        delta_features = np.diff(orig_inputs, axis=0, prepend=orig_inputs[0:1])
        orig_inputs = np.hstack([orig_inputs, delta_features])
    elif n_input == 39:
        # print("39维MFCC")
        # MFCC系数 + 一阶差分 + 二阶差分
        orig_inputs = mfcc(audio, samplerate=fs, numcep=13)
        # 计算一阶差分
        delta_features = np.diff(orig_inputs, axis=0, prepend=orig_inputs[0:1])
        # 计算二阶差分
        delta_delta_features = np.diff(delta_features, axis=0, prepend=delta_features[0:1])
        orig_inputs = np.hstack([orig_inputs, delta_features, delta_delta_features])
    else:
        # 默认情况，只使用基本MFCC
        orig_inputs = mfcc(audio, samplerate=fs, numcep=n_input)

    orig_inputs = orig_inputs[::2]  # 每隔一行取样（保持时间分辨率）

    # 计算总特征维度
    total_features = n_input + 2 * n_input * n_context
    train_inputs = np.zeros((orig_inputs.shape[0], total_features))

    # 准备输入数据
    time_slices = range(orig_inputs.shape[0])

    for time_slice in time_slices:
        # 前9帧：不足时前面补0
        past_start = max(0, time_slice - n_context)
        past_end = time_slice
        past_data = orig_inputs[past_start:past_end]

        # 如果没有足够的历史帧，在前面补0
        if len(past_data) < n_context:
            pad_shape = (n_context - len(past_data), n_input)
            pad = np.zeros(pad_shape)
            past_features = np.vstack((pad, past_data)) if len(past_data) > 0 else pad
        else:
            past_features = past_data

        # 当前帧
        now_data = orig_inputs[time_slice]

        # 后9帧：不足时后面补0
        future_start = time_slice + 1
        future_end = min(orig_inputs.shape[0], time_slice + n_context + 1)
        future_data = orig_inputs[future_start:future_end]

        if len(future_data) < n_context:
            pad_shape = (n_context - len(future_data), n_input)
            pad = np.zeros(pad_shape)
            future_features = np.vstack((future_data, pad)) if len(future_data) > 0 else pad
        else:
            future_features = future_data[:n_context]

        # 展平特征
        past_flat = past_features.flatten()  # 现在是9×39=351维
        now_flat = now_data  # 现在是39维
        future_flat = future_features.flatten()  # 现在是9×39=351维

        # 组合所有特征：351 + 39 + 351 = 741维
        combined_features = np.concatenate([past_flat, now_flat, future_flat])

        # 存储到输出数组
        train_inputs[time_slice] = combined_features

    # 标准化
    train_inputs = (train_inputs - np.mean(train_inputs)) / np.std(train_inputs)
    return train_inputs


def pad_sequences(sequences, maxlen=None, dtype=np.float32,
                  padding='post', truncating='post', value=0.):
    """
    音频数据对齐
    post表示后补0  pre表示前补0
    :param sequences:
    :param maxlen:
    :param dtype:
    :param padding:
    :param truncating:
    :param value:
    :return:
    """
    sequences_each_len = np.asarray([len(s) for s in sequences], dtype=np.int64)

    nb_samples = len(sequences)
    if maxlen is None:
        maxlen = np.max(sequences_each_len)

    # 从第一个非空的序列中的样本形状
    sample_shape = tuple()
    for s in sequences:
        if len(s) > 0:
            # test
            # temp = np.asarray(s)
            sample_shape = np.asarray(s).shape[1:]
            break

    x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
    for idx, s in enumerate(sequences):
        if len(s) == 0:
            continue
        if truncating == 'pre':
            trunc = s[-maxlen:]
        elif truncating == 'post':
            trunc = s[:maxlen]
        else:
            raise ValueError('Truncating type "%s" not understood' % truncating)

        # check `trunc` has expected shape
        trunc = np.asarray(trunc, dtype=dtype)
        if trunc.shape[1:] != sample_shape:
            raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
                             (trunc.shape[1:], idx, sample_shape))

        if padding == 'post':
            x[idx, :len(trunc)] = trunc
        elif padding == 'pre':
            x[idx, -len(trunc):] = trunc
        else:
            raise ValueError('Padding type "%s" not understood' % padding)
    return x, sequences_each_len


if __name__ == "__main__":
    conf = Config()

    get_wavs_lables(conf.get("FILE_DATA").wav_path, conf.get("FILE_DATA").label_file)
    print()
