import torch
import numpy as np
import sys
import os
import codecs
import re
from tqdm import tqdm
from config import ModelConfig
from torch.utils.data.dataset import Dataset
import data_prepare_by_name
import kaldiio
from sklearn.model_selection import train_test_split
from torch import Tensor, tensor
import math


class W2LData(Dataset):
    def __init__(self, data, label):
        self.data = data
        self.label = label
    def __len__(self):
        """返回元素的个数， 可在命令行直接使用len(对象)"""
        return len(self.data)

    def __getitem__(self, index):
        data_ = torch.Tensor(self.data[index])
        label_ = torch.Tensor(self.label[index])
        return data_, label_


class W2LData_byname(Dataset):
    # This is a list
    def __init__(self, utt_list):
        self.utt_list = utt_list
        
    def __len__(self):
        return len(self.utt_list)

    def __getitem__(self, index):
        utt = self.utt_list[index]
        return utt


class ChunkData(Dataset):
    def __init__(self, scp_list):
        self.scp_list = scp_list

    def __len__(self):
        return len(self.scp_list)

    def __getitem__(self, index):
        scp_pair = self.scp_list[int(index)]
        if scp_pair is None:
            return None
        #print('ChunkData index {}, value {}\n'.format(index, scp_pair))
        return scp_pair


def collate_fn(batch_data):
    print("***********get batch_data, size {}***********\n".format(len(batch_data)))
    num_classes = ModelConfig().num_classes
    batch_size = len(batch_data) 
    batch_feats, batch_label, batch_len = [], [], 0
    # 处理ark列表，整理ark实际数据成为tensor
    # first, read all this batch scp entries and merge into one matrix
    for utt_id, utt_dict in batch_data:
        #print(utt_id, utt_dict)
        feat_file = utt_dict["feats"]
        labels_file = utt_dict["labels"]
        if feat_file.endswith("txt"): 
            feats = np.loadtxt(feat_file, dtype=float)
            label = np.loadtxt(labels_file, dtype=int)
        elif feat_file.endswith("npy"):
            feats = np.load(feat_file)
            label = np.load(labels_file)
        min_len = min(feats.shape[0], label.shape[0])
        batch_feats.extend(feats[:min_len])
        batch_label.extend(label[:min_len])
        batch_len += min_len
    
    print('Frist merge matrix size {} label size {}\n'.format(feats.shape, label.shape))
    
    # remove label 0
    #batch_feats = np.asarray(batch_feats)
    #batch_label = np.asarray(batch_label) 
    #id_nonsil = np.where(batch_label != 0)[0]
    #batch_label = batch_label[id_nonsil]
    #batch_feats = batch_feats[id_nonsil, :]
    #batch_len = len(id_nonsil)
    #batch_label = batch_label - 1
    # compute real number of rows of feats
    batch_len = int(batch_len / batch_size) * batch_size
    
    batch_feats = np.asarray(batch_feats)[:batch_len]
    batch_label = np.asarray(batch_label, dtype=np.int32)[:batch_len]
    #print(batch_feats.shape)
    #print(batch_label.shape)
    
    feat_dim = batch_feats.shape[-1]
    # why reshape
    batch_feats = np.reshape(batch_feats, (batch_size, -1, feat_dim))
    batch_label = np.reshape(batch_label, (batch_size, -1))
    #batch_label = np.eye(num_classes)[batch_label].reshape((batch_size, -1, num_classes))
    #print(batch_feats.shape, batch_label.shape) 

    # if batch_feats.shape[1] % 2 != 0:
    #     batch_label = batch_label[:,:-1]
    # batch_label = batch_label[:,::2]

    # return result to trainer
    batch_feats = torch.Tensor(batch_feats)
    batch_label = torch.Tensor(batch_label)
    return batch_feats, batch_label

def collate_fn_cnn(batch_data):
    #print("***********Batch Start***********")
    batch_size = len(batch_data)
    batch_feats, batch_label, batch_len = [], [], 0
    for utt_id, utt_dict in batch_data:
        #print(utt_id, utt_dict)
        feats = np.loadtxt(utt_dict["feats"], dtype=float)
        label = np.loadtxt(utt_dict["labels"], dtype=int)
        #print(type(feats), type(label))
        #print(feats.shape[0], label.shape[0])
        min_len = min(feats.shape[0], label.shape[0])
        batch_feats.extend(feats[:min_len])
        batch_label.extend(label[:min_len])
        batch_len += min_len
        #print(feats.shape, label.shape)

    #print(np.asarray(batch_feats).shape)
    #print("1111", np.asarray(batch_feats).shape, batch_len)
    #print(batch_label)
    batch_len = int(batch_len / batch_size) * batch_size
    batch_feats = np.asarray(batch_feats)[:batch_len]
    batch_label = np.asarray(batch_label)[:batch_len]
    feat_dim = batch_feats.shape[-1]
    #print("2222", np.asarray(batch_feats).shape, batch_len)

    batch_feats = np.reshape(batch_feats, (batch_size, -1, feat_dim))
    batch_label = np.reshape(batch_label, (batch_size, -1))
    if batch_feats.shape[1] % 2 == 0:
        last_col = np.reshape(batch_label[:, -1], (-1, 1))
        batch_label = np.hstack((batch_label, last_col))
    batch_label = batch_label[:,::2]
    batch_feats = batch_feats.transpose((0, 2, 1))

    #print(batch_feats.shape, batch_label.shape)
    batch_feats = torch.Tensor(batch_feats)
    batch_label = torch.Tensor(batch_label)
    return batch_feats, batch_label


def collate_fn_chunk(batch_data):
    # 拿到的是一个scp_pair
    num_classes = ModelConfig().num_classes
    # 将scp_pair的chunk都累加起来
    batch_size = len(batch_data)
    batch_feats, batch_labels, batch_len = [], [], 0
    # 处理ark列表，整理ark实际数据成为tensor
    # first, read all this batch scp entries and merge into one matrix
    #print('+++++++++++++++DATA FEATS AND LABELS PAIR : {}\n'.format(batch_data))
    feats_mat_list, labels_mat_list = [], []
    acc_len = 0
    for data_info in batch_data:
        with codecs.open(filename=data_info["feats"], mode='r') as f:
            feats_mat_list = [line.strip() for line in f.readlines()]
        with codecs.open(filename=data_info["labels"], mode='r') as f:
            labels_mat_list = [line.strip() for line in f.readlines()]

        for item in zip(feats_mat_list, labels_mat_list):
            key1, feat_dir = re.split("\s+", item[0], maxsplit=1)
            key2, label_dir = re.split("\s+", item[1], maxsplit=1)
            assert key1 == key2
            feats = kaldiio.load_mat(feat_dir)
            labels = kaldiio.load_mat(label_dir)
            #print('$$$$$$$$$$$MAT (ID{}):FEATS{},LABELS{}\n'.format(key2, feats.shape, labels.shape))
            if acc_len == 0:
                batch_feats = feats
                batch_labels = labels
                acc_len = batch_feats.shape[0]
            else:
                temp_feats = np.vstack((batch_feats, feats))
                temp_labels = np.hstack((batch_labels, labels))
                batch_feats = temp_feats
                batch_labels = temp_labels
                acc_len += feats.shape[0]

    #print('~~~~~~~~~~~~~GET ONE BATCH, NUMPY SHAPE F:{},L:{} batch_size {}\n'.format(
    #    batch_feats.shape, batch_labels.shape, batch_size))
    # return result to trainer
    feat_dim = batch_feats.shape[-1]
    batch_feats = np.reshape(batch_feats, (batch_size, -1, feat_dim))
    batch_labels = np.reshape(batch_labels, (batch_size, -1))
    feats_tensor = torch.Tensor(batch_feats)
    labels_tensor = torch.Tensor(batch_labels)
    #print('-------------GET ONE BATCH, TENSOR SIZE F:{},L:{}\n'.format(feats_tensor.size(), labels_tensor.size()))
    return feats_tensor, labels_tensor


def make_context_indices(left_context, right_context, curr_index, max_len):
    indices = []
    if left_context < 0 or right_context < 0 or curr_index < 0 or max_len < 0:
        return indices
    left_i, right_i = curr_index - left_context, curr_index + 1
    while left_i < curr_index:
        if left_i < 0:
            indices.append(0)
        else:
            indices.append(left_i)
        left_i += 1

    indices.append(curr_index)
    right_end = curr_index + right_context
    while right_i <= right_end:
        if right_i < max_len:
            indices.append(right_i)
        else:
            indices.append(max_len-1)
        right_i += 1

    return indices


def splice_frames(ori_mat, left_context=-1, right_context=-1):
    '''
    :param ori_mat: 3-d numpy matrix input, batch-size, height, width
    :param left_context: left context width
    :param right_context: right context width
    :return: transformed matrix
    '''
    if left_context == -1 and right_context == -1:
        return ori_mat

    batch_size = ori_mat.shape[0]
    max_len = ori_mat.shape[1]
    ext_cols = (left_context + right_context + 1) * ori_mat.shape[2]
    spliced_mat = np.zeros((batch_size, max_len, ext_cols), dtype=np.float)
    for b in range(batch_size):
        for i, v in enumerate(ori_mat[b, :, :]):
            this_index = make_context_indices(left_context, right_context, i, max_len)
            spliced_mat[b, i, :] = ori_mat[b, this_index].reshape(1, ext_cols)
    return spliced_mat


def collate_fn_w2l_chunk(batch_data):
    # 拿到的是一个scp_pair
    num_classes = ModelConfig().num_classes
    # 将scp_pair的chunk都累加起来
    batch_size = len(batch_data)
    batch_feats, batch_labels, batch_len = [], [], 0
    # 处理ark列表，整理ark实际数据成为tensor
    # first, read all this batch scp entries and merge into one matrix
    #print('+++++++++++++++DATA FEATS AND LABELS PAIR : {}\n'.format(batch_data))
    feats_mat_list, labels_mat_list = [], []
    acc_len = 0
    for data_info in batch_data:
        with codecs.open(filename=data_info["feats"], mode='r') as f:
            feats_mat_list = [line.strip() for line in f.readlines()]
        with codecs.open(filename=data_info["labels"], mode='r') as f:
            labels_mat_list = [line.strip() for line in f.readlines()]

        for item in zip(feats_mat_list, labels_mat_list):
            key1, feat_dir = re.split("\s+", item[0], maxsplit=1)
            key2, label_dir = re.split("\s+", item[1], maxsplit=1)
            assert key1 == key2
            feats = kaldiio.load_mat(feat_dir)
            labels = kaldiio.load_mat(label_dir)
            #print('$$$$$$$$$$$MAT (ID{}):FEATS{},LABELS{}\n'.format(key2, feats.shape, labels.shape))
            if acc_len == 0:
                batch_feats = feats
                batch_labels = labels
                acc_len = batch_feats.shape[0]
            else:
                temp_feats = np.vstack((batch_feats, feats))
                temp_labels = np.hstack((batch_labels, labels))
                batch_feats = temp_feats
                batch_labels = temp_labels
                acc_len += feats.shape[0]

    #print('~~~~~~~~~~~~~GET ONE BATCH, NUMPY SHAPE F:{},L:{} batch_size {}\n'.format(
    #    batch_feats.shape, batch_labels.shape, batch_size))
    # return result to trainer

    #batch_feats = splice_frames(batch_feats, left_context=2, right_context=2)
    feat_dim = batch_feats.shape[-1]
    batch_feats = np.reshape(batch_feats, (batch_size, feat_dim, -1))
    batch_labels = np.reshape(batch_labels, (batch_size, -1))
    feats_tensor = torch.Tensor(batch_feats)
    labels_tensor = torch.Tensor(batch_labels)
    #print('-------------GET ONE BATCH, TENSOR SIZE F:{},L:{}\n'.format(feats_tensor.size(), labels_tensor.size()))
    return feats_tensor, labels_tensor


def compute_dev_ratio(whole_data_set):
    totol_size = len(whole_data_set)
    ratio = 1 / totol_size
    ratio = (1 + ratio) / totol_size
    if ratio < 0.01:
        ratio = 0.01

    dev_size = totol_size * ratio
    while dev_size > 8:
        dev_size -= 1
    ratio = dev_size/totol_size
    return ratio


def dev_ratio(whole_data_set, batch_size):
    ratio = batch_size / len(whole_data_set)
    return ratio


def compute_exp_lr_base(start_lr, final_lr, train_size, batch_size, epoch_times):
    min_decay_factor = 1e-8
    lr_changed_times = int(train_size/batch_size) * epoch_times #epoch number
    kk = math.log(float(final_lr/start_lr), 10) / lr_changed_times
    factor = math.pow(10, kk)
    if start_lr * (1 - factor) <= min_decay_factor:
        return (1 - min_decay_factor/start_lr) * (1 - 2 / lr_changed_times)
    return factor






if __name__ == "__main__":
    x = np.random.randn(5, 3)
    left_context, right_context = 0, 0
    y = splice_frames(x, left_context, right_context)
    print('x={}\ny={}\n'.format(x, y))
    decay = compute_exp_lr_base(0.0002, 0.00002, 763308, 64, 10)
    start_lr = 0.0002
    factor = 0.9999332307563308
    diff = start_lr - start_lr * factor
    print('eps : {}\n'.format(diff))
    if len(sys.argv) < 2:
        print('{} {} {}\n'.format(sys.argv[0], "<feats>", "<labels>"))
        sys.exit(-1)
    feats_scp_list, labels_scp_list = sys.argv[1], sys.argv[2]
    data_set_list = data_prepare_by_name.from_list_get_data_list(feats_scp_list, labels_scp_list)
    train_set_list, dev_set_list = train_test_split(data_set_list, test_size=0.01)
    chunk_data = ChunkData(data_set_list)
    batch_size = 1
    counter = 0
    for index, scp_pair in enumerate(chunk_data, 1):
        print('the {} times set scp_pair {}\n'.format(index, scp_pair))
        feats_tensor, labels_tensor = collate_fn_chunk(scp_pair)
