import os
import sys
import re
import codecs
import shutil
import kaldiio
import collections
import numpy as np
import multiprocessing
import threading
from queue import Queue
import psutil
from tqdm import trange, tqdm


def make_context_indices(left_context, right_context, curr_index, max_len):
    indices = []
    if left_context < 0 or right_context < 0 or curr_index < 0 or max_len < 0:
        return indices
    left_i, right_i = curr_index - left_context, curr_index + 1
    while left_i < curr_index:
        if left_i < 0:
            indices.append(0)
        else:
            indices.append(left_i)
        left_i += 1

    indices.append(curr_index)
    right_end = curr_index + right_context
    while right_i <= right_end:
        if right_i < max_len:
            indices.append(right_i)
        else:
            indices.append(max_len-1)
        right_i += 1

    return indices


def splice_frames(ori_mat, left_context=-1, right_context=-1):
    if left_context == -1 and right_context == -1:
        return ori_mat
    max_len = ori_mat.shape[0]
    ext_cols = (left_context + right_context + 1) * ori_mat.shape[1]
    spliced_mat = np.zeros((max_len, ext_cols), dtype=np.float)
    for i, v in enumerate(ori_mat):
        this_index = make_context_indices(left_context, right_context, i, max_len)
        spliced_mat[i, :] = ori_mat[this_index].reshape(1, ext_cols)
    return spliced_mat


def get_numpy_mat(ark_rspecifier):
    key1, real_addr = re.split(r"\s+", ark_rspecifier, maxsplit=1)
    numpy_mat = kaldiio.load_mat(real_addr)
    return key1, numpy_mat


def get_aligned_feats_labels(feats_rspecifier, labels_rspecifier):
    key1, numpy_feats = get_numpy_mat(feats_rspecifier)
    key2, numpy_labels = get_numpy_mat(labels_rspecifier)
    if key2 != key1:
        return None
    return key1, numpy_feats, numpy_labels


def write_chunck_to_file(chunk_feats, chunk_labels, dst_dir, index, mats_per_ark, frames_per_mat):
    batch_feats, batch_labels = chunk_feats, chunk_labels
    std_filesuffix = str(index)
    feats_ark_name, feats_scp_name = dst_dir + "/feats_" + std_filesuffix + ".ark", dst_dir + "/feats_" + std_filesuffix + ".scp"
    label_ark_name, label_scp_name = dst_dir + "/labels_" + std_filesuffix + ".ark", dst_dir + "/labels_" + std_filesuffix + ".scp"
    feats_dict = collections.OrderedDict()
    labels_dict = collections.OrderedDict()

    for i in range(mats_per_ark):
        sub_index = std_filesuffix + '_' + str(i).zfill(0)
        feats_dict[sub_index] = batch_feats[i * frames_per_mat:(i + 1) * frames_per_mat, :]
        labels_dict[sub_index] = batch_labels[i * frames_per_mat:(i + 1) * frames_per_mat]

    kaldiio.save_ark(feats_ark_name, feats_dict, scp=feats_scp_name)
    kaldiio.save_ark(label_ark_name, labels_dict, scp=label_scp_name)
    feats_dict.clear()
    labels_dict.clear()
    #print('{} is written, mats {}, frames_per_mat {}\n'.format(index, mats_per_ark, frames_per_mat))


def list_spliter(thread_id, feats_ark_part_list, labels_ark_part_list,
                 mats_per_ark, frames_per_mat, feats_dim, left_context, right_context, residual_q):
    frames_per_ark = int(mats_per_ark * frames_per_mat)
    assert len(labels_ark_part_list) == len(feats_ark_part_list)
    assert frames_per_ark > 0
    print('+++++++++++Start to work in thread {}, {} frames in ark\n'.format(
        thread_id, frames_per_ark))
    ori_scp_size = len(feats_ark_part_list)
    print('-----------Thread {} feats scp size {}, labels scp size {}'.format(thread_id, ori_scp_size,ori_scp_size))
    labels_scps, feats_scps = [], []
    batch_feats, batch_labels, acc_len = [], [], 0
    index = 0
    pbar = tqdm(range(ori_scp_size))
    for item in zip(feats_ark_part_list, labels_ark_part_list):
        pbar.update(1)
        key, numpy_feats, numpy_labels = get_aligned_feats_labels(item[0], item[1])
        if key is None:
            continue
        shrinked_size = min(numpy_labels.shape[0], numpy_feats.shape[0])
        numpy_feats = numpy_feats[:shrinked_size, :]
        numpy_feats = splice_frames(numpy_feats, left_context, right_context)
        numpy_labels = numpy_labels[:shrinked_size]
        while acc_len >= frames_per_ark:
            residual = acc_len - frames_per_ark
            # dump this ark and scp
            std_filesuffix = str(thread_id) + "_" + str(index).zfill(0)
            write_chunck_to_file(batch_feats[:frames_per_ark, :], batch_labels[:frames_per_ark],
                                 dst_dir=dst_dir, index=std_filesuffix,
                                 mats_per_ark=mats_per_ark, frames_per_mat=frames_per_mat)
            batch_feats = batch_feats[frames_per_ark:, :]
            batch_labels = batch_labels[frames_per_ark:]
            acc_len = residual
            #print('Remain_size {}, written {} times\n'.format(acc_len, index))
            index += 1

        if len(batch_feats) == 0 and len(batch_labels) == 0:
            batch_feats = numpy_feats
            batch_labels = numpy_labels
            acc_len = shrinked_size
        else:
            temp_feats = np.vstack((batch_feats, numpy_feats))
            temp_labels = np.hstack((batch_labels, numpy_labels))
            batch_feats = temp_feats
            batch_labels = temp_labels
            acc_len += shrinked_size
            #print('cumulate size {} in thread {}\n'.format(acc_len, thread_id))

        if index % 1000 == 0 and index != 0:
            print('{} files are processed in thread {}\n'.format(index, thread_id))

    if acc_len < frames_per_ark:
        print('warnings: {} frames is not written in worker thread {}\n'.format(acc_len, thread_id))
    residual_data_dict = collections.OrderedDict()
    if acc_len > 0:
        residual_data_dict["feats"] = batch_feats
        residual_data_dict["labels"] = batch_labels
        residual_q.put(residual_data_dict)


def parallel_main(src_dir, dst_dir,
                  org_feat_dim=80,
                  left_context=2,
                  right_context=2,
                  mats_per_ark=1024, frames_per_mat=128):
    feats_scp = src_dir + "//feats.scp"
    alis_scp = src_dir + "//text"
    print('++++++++++++++mats per ark {}++++++++++++++frames per mat {}\n'.format(mats_per_ark, frames_per_mat))
    frames_per_ark = frames_per_mat * mats_per_ark
    print('+++++++++++++++frames per ark {}\n'.format(frames_per_ark))

    if not os.path.exists(alis_scp) or not os.path.exists(feats_scp):
        raise FileNotFoundError
    if not os.path.exists(dst_dir):
        os.mkdir(dst_dir)

    print('Warnings:{} should be fixed \n'.format(src_dir))
    cpu_count = psutil.cpu_count()

    feats_list = [line.strip() for line in codecs.open(filename=feats_scp, mode='r').readlines()]
    alis_list = [line.strip() for line in codecs.open(filename=alis_scp, mode='r').readlines()]
    print('==========All feats size {}, labels size {}'.format(len(feats_list), len(alis_list)))
    assert len(feats_list) == len(alis_list)
    samples_per_thread = len(feats_list) / cpu_count
    samples_per_thread = int(samples_per_thread) + 1
    print('Use {} threads and {} samples every thread \n'.format(cpu_count, samples_per_thread))
    remain_q = Queue()
    #def list_spliter(thread_id, feats_ark_part_list,
    # labels_ark_part_list, batch_size, feats_dim, residual_q):
    workers = []
    feat_dim = org_feat_dim * (left_context + right_context + 1)
    for i in range(cpu_count):
        part_feats_list, part_labels_list = [], []
        if (i+1)*samples_per_thread == len(feats_list):
            part_feats_list = feats_list[samples_per_thread * i:]
            part_labels_list = alis_list[samples_per_thread * i:]
        else:
            part_feats_list = feats_list[samples_per_thread*i:(i+1)*samples_per_thread]
            part_labels_list = alis_list[samples_per_thread*i:(i+1)*samples_per_thread]
        t = threading.Thread(target=list_spliter, args=(i+1, part_feats_list, part_labels_list,
                                                        mats_per_ark, frames_per_mat, feat_dim,
                                                        left_context,
                                                        right_context,
                                                        remain_q))
        workers.append(t)
        t.start()

    for i in range(cpu_count):
        workers[i].join()

    last_feats_mat, last_labels_mat = np.zeros((frames_per_ark, feat_dim), dtype=np.float32), \
                                      np.zeros((1, frames_per_ark), dtype=np.float32)
    print('!!!!!  Res fractions {}\n'.format(remain_q.qsize()))
    acc_len = 0
    index = 0
    batch_feats, batch_labels = [], []
    while not remain_q.empty():
        this_pair = remain_q.get()
        numpy_feats = this_pair["feats"]
        splice_frames(numpy_feats, left_context=left_context, right_context=right_context)
        numpy_labels = this_pair["labels"]
        shrinked_size = min(numpy_feats.shape[0], numpy_labels.shape[0])
        while acc_len >= frames_per_ark:
            residual = acc_len - frames_per_ark
            print('++++++++++++acc len {}, residual {}+++++++++++\n'.format(acc_len, residual))
            # dump this ark and scp
            std_filesuffix = str(0) + "_" + str(index).zfill(0)
            write_chunck_to_file(batch_feats[:frames_per_ark, :], batch_labels[:frames_per_ark],
                                 dst_dir=dst_dir, index=std_filesuffix,
                                 mats_per_ark=mats_per_ark, frames_per_mat=frames_per_mat)
            index += 1
            batch_feats = batch_feats[frames_per_ark:, :]
            batch_labels = batch_labels[frames_per_ark:]
            acc_len = residual

        if len(batch_feats) == 0 and len(batch_labels) == 0:
            batch_feats = numpy_feats
            batch_labels = numpy_labels
            acc_len = shrinked_size
        else:
            temp_feats = np.vstack((batch_feats, numpy_feats))
            temp_labels = np.hstack((batch_labels, numpy_labels))
            batch_feats = temp_feats
            batch_labels = temp_labels
            acc_len += shrinked_size

    feats_scp_list, labels_scp_list = [], []
    f_list = os.listdir(dst_dir)
    print('size of dst_dir {}\n'.format(len(f_list)))
    for file in f_list:
        if re.match(r"^feats_\d{1,}_\d{1,}\.scp$", file.strip()):
            feats_scp_list.append(file)

        if re.match(r"^labels_\d{1,}_\d{1,}\.scp$", file.strip()):
            labels_scp_list.append(file)

    print('size of feats and labels {} {}'.format(len(feats_scp_list), len(labels_scp_list)))
    with codecs.open(filename=dst_dir+"//final_feats_scps.list", mode='w') as f:
        for file in sorted(feats_scp_list):
            f.write('{}//{}\n'.format(dst_dir, file))

    with codecs.open(filename=dst_dir+"//final_labels_scps.list", mode='w') as f:
        for file in sorted(labels_scp_list):
            f.write('{}//{}\n'.format(dst_dir, file))


    print('last feats and labels {} {}'.format(last_feats_mat.shape, last_labels_mat.shape))
    print('All things are done!')


if __name__ == '__main__':
    if len(sys.argv) < 4:
        print('{} {} {} {} {} {} {}\n'.format(sys.argv[0], "src_dir", "dst_dir", "left_context", "right_context", "number_mats_per_ark", "number_frames_per_mat"))
        sys.exit(-1)
    src_dir, dst_dir, left_c, right_c, number_mats_per_ark, number_frames_per_mat = sys.argv[1], sys.argv[2], \
                                                                   sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6]
    parallel_main(src_dir=src_dir, dst_dir=dst_dir,
                  org_feat_dim=80,
                  left_context=int(left_c),
                  right_context=int(right_c),
                  mats_per_ark=int(number_mats_per_ark),
                  frames_per_mat=int(number_frames_per_mat))
    print("All things done.\n")

