import os
import sys
import re
import codecs
import shutil
import kaldiio
import collections
import numpy as np
import multiprocessing
import threading
from queue import Queue
import psutil
import time
from data.data_load import make_context_indices, splice_frames


lock = threading.Lock()

def get_numpy_mat(ark_rspecifier):
    key1, real_addr = re.split(r"\s+", ark_rspecifier, maxsplit=1)
    numpy_mat = kaldiio.load_mat(real_addr)
    return key1, numpy_mat


def get_aligned_feats_labels(feats_rspecifier, labels_rspecifier):
    key1, numpy_feats = get_numpy_mat(feats_rspecifier)
    key2, numpy_labels = get_numpy_mat(labels_rspecifier)
    if key2 != key1:
        return None
    return key1, numpy_feats, numpy_labels


def write_chunck_to_file(chunk_feats, chunk_labels, dst_dir, index, mats_per_ark, frames_per_mat):
    batch_feats, batch_labels = chunk_feats, chunk_labels
    std_filesuffix = str(index)
    feats_ark_name, feats_scp_name = dst_dir + "/feats_" + std_filesuffix + ".ark", dst_dir + "/feats_" + std_filesuffix + ".scp"
    label_ark_name, label_scp_name = dst_dir + "/labels_" + std_filesuffix + ".ark", dst_dir + "/labels_" + std_filesuffix + ".scp"
    feats_dict = collections.OrderedDict()
    labels_dict = collections.OrderedDict()

    for i in range(mats_per_ark):
        sub_index = std_filesuffix + '_' + str(i).zfill(0)
        feats_dict[sub_index] = batch_feats[i * frames_per_mat:(i + 1) * frames_per_mat, :]
        labels_dict[sub_index] = batch_labels[i * frames_per_mat:(i + 1) * frames_per_mat]

    kaldiio.save_ark(feats_ark_name, feats_dict, scp=feats_scp_name)
    kaldiio.save_ark(label_ark_name, labels_dict, scp=label_scp_name)
    feats_dict.clear()
    labels_dict.clear()
    #print('{} is written, mats {}, frames_per_mat {}\n'.format(index, mats_per_ark, frames_per_mat))


def list_spliter(thread_id, feats_ark_part_list, labels_ark_part_list,
                 mats_per_ark, frames_per_mat, feats_dim, left_context, right_context, sum_num_frames, residual_q):
    frames_per_ark = int(mats_per_ark * frames_per_mat)
    assert len(labels_ark_part_list) == len(feats_ark_part_list)
    assert frames_per_ark > 0

    num_ark = sum_num_frames // frames_per_ark
    residual_frames = sum_num_frames % frames_per_ark
    save_frames = num_ark * frames_per_ark
    print("[LOG] Thread {}; feats_scp_size {}; labels_scp_size {}, num_ark {}, sum_num_frames {}".format(thread_id, len(feats_ark_part_list), len(labels_ark_part_list),  num_ark, sum_num_frames))

    if num_ark != 0:
        common_frames = frames_per_ark
    else:
        common_frames = residual_frames

    batch_feats, batch_labels, acc_len = np.empty((common_frames, feats_dim)), np.empty(common_frames), 0
    index, start_time = 0, time.time()
    for i, item in enumerate(zip(feats_ark_part_list, labels_ark_part_list)):
        key, numpy_feats, numpy_labels = get_aligned_feats_labels(item[0], item[1]) # item中都是ark
        shrinked_size = min(numpy_labels.shape[0], numpy_feats.shape[0])
        numpy_feats = numpy_feats[:shrinked_size, :]
        numpy_feats = splice_frames(numpy_feats, left_context, right_context)
        numpy_labels = numpy_labels[:shrinked_size]
        end_line = min(acc_len + shrinked_size, common_frames)
        batch_feats[acc_len:end_line, :] = numpy_feats[:end_line-acc_len, :]
        batch_labels[acc_len:end_line] = numpy_labels[:end_line-acc_len]
        if acc_len + shrinked_size > common_frames:
            std_filesuffix = str(thread_id) + "_" + str(index).zfill(0)
            write_chunck_to_file(batch_feats[:end_line, :], batch_labels[:end_line],
                                 dst_dir=dst_dir, index=std_filesuffix,
                                 mats_per_ark=mats_per_ark, frames_per_mat=frames_per_mat)
            acc_len = acc_len + shrinked_size - common_frames
            #batch_feats, batch_labels = np.empty((common_frames, feats_dim)), np.empty(common_frames)
            batch_feats[:acc_len, :] = numpy_feats[-acc_len:, :]
            batch_labels[:acc_len] = numpy_labels[-acc_len:]
            index += 1
        else:
            acc_len = end_line

        if i % 100 == 0 and i != 0:
            print('\t Thread {}; file_number {}; time {}\n'.format(thread_id, index, time.time()-start_time), end="\r")

    residual_feats = batch_feats
    residual_labels = batch_labels

    # 回收多余的
    lock.acquire()
    residual_data_dict = collections.OrderedDict()
    if residual_frames > 0:
        residual_data_dict["feats"] = residual_feats
        residual_data_dict["labels"] = residual_labels
        residual_q.put(residual_data_dict)
    lock.release()


def parallel_main(src_dir, dst_dir,
                  org_feat_dim=80,
                  left_context=2,
                  right_context=2,
                  mats_per_ark=1024, frames_per_mat=128):
    feats_scp = src_dir + "//feats.scp"
    alis_scp = src_dir + "//text"
    utt2num_frames_file = src_dir + "//utt2num_frames"

    frames_per_ark = frames_per_mat * mats_per_ark
    print('MatsPerArk {};\t FramesPerMat {};\t FramesPerArk {}\n'.format(mats_per_ark, frames_per_mat, frames_per_ark))

    if not os.path.exists(alis_scp) or not os.path.exists(feats_scp):
        raise FileNotFoundError
    if not os.path.exists(dst_dir):
        os.mkdir(dst_dir)

    print('[ASSERT] {} should be fixed \n'.format(src_dir))
    feats_list = [line.strip() for line in codecs.open(filename=feats_scp, mode='r').readlines()]
    alis_list = [line.strip() for line in codecs.open(filename=alis_scp, mode='r').readlines()]
    print('All feats_size {}, labels_size {}'.format(len(feats_list), len(alis_list)))
    assert len(feats_list) == len(alis_list)

    utt2num_frames_dict = {}
    with open(utt2num_frames_file, "r") as f:
        for line in f:
            utt_id, num_frames = re.split(r"\s+", line.strip(), maxsplit=1)
            utt2num_frames_dict[utt_id] = eval(num_frames)

    cpu_count = psutil.cpu_count()
    samples_per_thread = len(feats_list) / cpu_count
    samples_per_thread = int(samples_per_thread) + 1
    print('NumThreads {}; SamplesPerThread {}\n'.format(cpu_count, samples_per_thread))

    remain_frames = 0
    workers = []
    feat_dim = org_feat_dim * (left_context + right_context + 1)
    ## CPU的个数

    with multiprocessing.Manager() as manager:
        remain_q = manager.Queue()
        pool = multiprocessing.Pool(cpu_count)
        for i in range(cpu_count):
            part_feats_list, part_labels_list, part_sum_num_frames = [], [], 0
            for j in range(samples_per_thread*i, min((i+1)*samples_per_thread, len(feats_list))):
                cur_feat = feats_list[j]
                cur_label = alis_list[j]
                cur_feat_uttid = re.split(r"\s+", cur_feat, maxsplit=1)[0]
                cur_label_uttid = re.split(r"\s+", cur_label, maxsplit=1)[0]
                if cur_feat_uttid == cur_label_uttid and cur_feat_uttid in utt2num_frames_dict and cur_feat_uttid != None:
                    part_feats_list.append(feats_list[j])
                    part_labels_list.append(alis_list[j])
                    part_sum_num_frames += utt2num_frames_dict[cur_label_uttid]
                else:
                    print(i, "skip")
            pool.apply_async(list_spliter, args=(i+1, part_feats_list, part_labels_list,
                                                            mats_per_ark, frames_per_mat, feat_dim,
                                                            left_context,
                                                            right_context,
                                                            part_sum_num_frames,
                                                            remain_q))

            remain_frames += part_sum_num_frames % frames_per_ark

        pool.close()
        pool.join()

        #处理剩余的
        remain_frames = remain_frames // frames_per_ark * frames_per_ark
        print('[LOG] Thread {}; excess Queue Length {}\n'.format(cpu_count+1, remain_q.qsize()))
        batch_feats, batch_labels, acc_len = np.empty((frames_per_ark, feat_dim)), np.empty(frames_per_ark), 0
        index = 0
        while not remain_q.empty():
            this_pair = remain_q.get()
            numpy_feats = this_pair["feats"]
            numpy_labels = this_pair["labels"]
            splice_frames(numpy_feats, left_context=left_context, right_context=right_context)
            shrinked_size = min(numpy_feats.shape[0], numpy_labels.shape[0])
            end_line = min(acc_len + shrinked_size, frames_per_ark)

            batch_feats[acc_len:end_line, :] = numpy_feats[:end_line-acc_len, :]
            batch_labels[acc_len:end_line] = numpy_labels[:end_line-acc_len]
            if acc_len + shrinked_size > frames_per_ark:
                std_filesuffix = str(cpu_count+1) + "_" + str(index).zfill(0)
                write_chunck_to_file(batch_feats[:end_line, :], batch_labels[:end_line],
                                     dst_dir=dst_dir, index=std_filesuffix,
                                     mats_per_ark=mats_per_ark, frames_per_mat=frames_per_mat)
                acc_len = acc_len + shrinked_size - frames_per_ark
                #batch_feats, batch_labels = np.empty((frames_per_ark, feat_dim)), np.empty(frames_per_ark)
                batch_feats[:acc_len, :] = numpy_feats[-acc_len:, :]
                batch_labels[:acc_len] = numpy_labels[-acc_len:]
                index += 1
            else:
                acc_len = end_line

    # 制作总的目录
    feats_scp_list, labels_scp_list = [], []
    f_list = os.listdir(dst_dir)
    print('dst_dir: num_file {}\n'.format(len(f_list)))
    for file in f_list:
        if re.match(r"^feats_\d{1,}_\d{1,}\.scp$", file.strip()):
            feats_scp_list.append(file)

        if re.match(r"^labels_\d{1,}_\d{1,}\.scp$", file.strip()):
            labels_scp_list.append(file)

    print('dst_dir: num_feats_file {}; num_labels_file {}'.format(len(feats_scp_list), len(labels_scp_list)))
    with codecs.open(filename=dst_dir+"//final_feats_scps.list", mode='w') as f:
        for file in sorted(feats_scp_list):
            f.write('{}//{}\n'.format(dst_dir, file))

    with codecs.open(filename=dst_dir+"//final_labels_scps.list", mode='w') as f:
        for file in sorted(labels_scp_list):
            f.write('{}//{}\n'.format(dst_dir, file))

    print('All things are done!')


if __name__ == '__main__':
    if len(sys.argv) < 7:
        print('{} {} {} {} {} {} {}\n'.format(sys.argv[0], "src_dir", "dst_dir", "left_context", "right_context", "number_mats_per_ark", "number_frames_per_mat"))
        sys.exit(-1)
    src_dir, dst_dir, left_c, right_c, number_mats_per_ark, number_frames_per_mat = sys.argv[1], sys.argv[2], \
                                                                   sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6]

    print('Cmd : {}\n'.format(sys.argv[1:]))
    start_time = time.time()
    parallel_main(src_dir=src_dir, dst_dir=dst_dir,
                  org_feat_dim=80,
                  left_context=int(left_c),
                  right_context=int(right_c),
                  mats_per_ark=int(number_mats_per_ark),
                  frames_per_mat=int(number_frames_per_mat))
    print("All things done.\n")
    print(time.time()-start_time)
    print("---------------------------------")

