import io
import json
import logging
import multiprocessing
import os
import tarfile
import time
import random

import torch
import torchaudio
from gxl_ai_utils.utils import utils_file
from tqdm import tqdm

# text_path_list = [
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/CSL/csl_final.json",
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/pCLUE/pCLUE_train_all.json",
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/news_commentary/news_commentary_all.json"
# ]
# text_path_list = [
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/rouzhiba_gpt4/ruozhiba_qa2449_gpt4t_final.json",
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/stem_zh_instruct/all_final.json",
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/stanford_alpaca-main/alpaca_gpt4_data_zh_final.json",
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/BELLE/train_2M_CN_final.json",
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/WebQA/train_final.json",
# ]
# text_path_list = [
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/dolly-15k-instruction-alpaca-format/dolly_final.jsonl",
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/GPT4-LLM-Cleaned/alpaca_gpt4_data_unfiltered_final.json",
#     "/home/work_nfs11/zhguo/code/osum_tmp/QA/datasets/OpenHermesPreferences/out_final.jsonl"
# ]
text_path_list = [
    "/home/work_nfs16/zyzhang_1/workspace/VITA-MLLM/AudioQA-1M/train.json"
]

def convert_dict_list_to_format_dict_list_for_t2t(dict_list):
    task = "<TEXT2TEXT>"
    lang = "<CN>"
    speaker = "<NONE>"
    emotion = "NEUTRAL"
    style = "<NONE>"
    gender = "<NONE>"
    age = "<NONE>"
    caption = "<NONE>"
    extra_info = {'duration':-1,}
    output_dict_list = []
    for dict_item in tqdm(dict_list, desc='Converting dict list to format dict list for t2t',total=len(dict_list) ):
        # 在后面加上长时间戳和随机数，防止key重复
        # key = dict_item['key'] + "_" + str(int(time.time())) + "_" + str(random.randint(1, 999999))
        key = dict_item['key']
        q = dict_item['q']
        a = dict_item['a']
        extra_info_item = {'duration':-1, 'question':q}
        txt = a
        if len(txt) < 1 or len(q)< 1:
            logging.warning(f"Skip item with empty text or question: {dict_item}")
            continue
        dict_output_item = {
            'task': task,
            'key': key,
            'lang': lang,
           'speaker': speaker,
            'emotion': emotion,
           'style': style,
            'gender': gender,
            'age': age,
            'caption': caption,
            'txt': txt,
            'extra': extra_info_item
        }
        output_dict_list.append(dict_output_item)
    return output_dict_list



def write_tar_file(data_list, tar_file, resample=16000, index=0, total=1):
    print('Processing {} {}/{}'.format(tar_file, index, total))
    read_time = 0.0
    save_time = 0.0
    write_time = 0.0
    if os.path.exists(tar_file+".finished"):
        return

    with tarfile.open(tar_file, "w", encoding='utf8') as tar:
        # 使用 tqdm 显示进度条
        for i, item in enumerate(tqdm(data_list, desc=f'Creating {os.path.basename(tar_file)}', unit='file', total=len(data_list))):
            try:
                wav = item.get("wav", None)
                key = str(item["key"])
                txt = str(item["txt"])
                # print(key, wav)

                # Save text file
                txt_file = key + '.txt'
                txt = txt.encode('utf8')
                txt_data = io.BytesIO(txt)
                txt_info = tarfile.TarInfo(txt_file)
                txt_info.size = len(txt)
                tar.addfile(txt_info, txt_data)
                ts = time.time()
                # Save metadata fields (task, lang, speaker, emotion, gender) each in separate files
                for field, value in item.items():
                    if field in ["wav", "extra", 'txt','key']:
                        continue # 单独处理
                    field_file = f"{key}.{field}"  # 文件名格式修改
                    # if not isinstance(value, str):
                    #     value = str(value)
                    field_data = io.BytesIO(str(value).encode('utf8'))
                    field_info = tarfile.TarInfo(field_file)
                    field_info.size = len(value)
                    tar.addfile(field_info, field_data)
                remaining_extra = item["extra"]
                jsonl_line = json.dumps(remaining_extra, ensure_ascii=False)
                jsonl_data = jsonl_line.encode('utf8')
                jsonl_file = key + '.extra'  # 文件名格式修改
                jsonl_data_io = io.BytesIO(jsonl_data)
                jsonl_info = tarfile.TarInfo(jsonl_file)
                jsonl_info.size = len(jsonl_data)
                tar.addfile(jsonl_info, jsonl_data_io)

                write_time += (time.time() - ts)
            except Exception as e:
                print(f"Error processing file {wav}: {e}")

        print(f'read {read_time:.2f}s save {save_time:.2f}s write {write_time:.2f}s')
        with open(tar_file+".finished", 'wb'):
            pass

def make_shards(jsonl_file, shards_dir, num_utts_per_shard=1000, prefix='shards', resample=16000, num_threads=32):
    """"""
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s %(message)s')

    torch.set_num_threads(1)

    data = []
    with open(jsonl_file, 'r', encoding='utf8') as fin:
        for line in fin:
            try:
                data.append(json.loads(line.strip()))
            except json.JSONDecodeError as e:
                logging.error(f"Error decoding JSON on line: {line.strip()}")
                raise e

    print(f"Total records loaded: {len(data)}")

    num = num_utts_per_shard
    chunks = [data[i:i + num] for i in range(0, len(data), num)]
    os.makedirs(shards_dir, exist_ok=True)
    shards_dir_true = shards_dir
    os.makedirs(shards_dir_true, exist_ok=True)

    # 使用线程池加速处理
    pool = multiprocessing.Pool(processes=num_threads)
    shards_list = []
    num_chunks = len(chunks)
    for i, chunk in tqdm(enumerate(chunks), desc='Creating shards', total=num_chunks):
        tar_file = os.path.join(shards_dir_true, '{}_{:09d}.tar'.format(prefix, i))
        shards_list.append(tar_file)
        # write_tar_file(chunk, tar_file, resample, i, num_chunks)
        pool.apply_async(
            write_tar_file,
            (chunk, tar_file, resample, i, num_chunks))

    pool.close()
    pool.join()

    with open(os.path.join(shards_dir,'shards_list.txt'), 'w', encoding='utf8') as fout:
        for name in shards_list:
            fout.write(name + '\n')
    data_list_path_new = os.path.join(shards_dir, 'data.list')
    utils_file.copy_file(jsonl_file, data_list_path_new)

def _main_get_data_list(input_path, output_file_path):
    # output_list = []
    # for text_path in [input_path]:
    #     # dict_list = utils_file.load_dict_list_from_jsonl(text_path) # key q a
    #     # item_list = json.loads(open(text_path, 'r', encoding='utf8').read())
    #     # res_dict_list = []
    #     # for item_l in item_list:
    #     #     if len(item_l) == 2:
    #     #         q = item_l[0]["content"]
    #     #         a = item_l[1]["content"]
    #     #     if len(item_l) == 3:
    #     #         q = item_l[1]["content"]
    #     #         a = item_l[2]["content"]
    #     #     # key是时间戳+随机数，防止key重复
    #     #     key = str(int(time.time())) + "_" + str(random.randint(1, 999999))
    #     #     res_dict_list.append({'key':key, 'q':q, 'a':a})
    res_dict_list = utils_file.load_dict_list_from_jsonl(input_path)
    output_list = convert_dict_list_to_format_dict_list_for_t2t(res_dict_list)
    utils_file.write_dict_list_to_jsonl(output_list, output_file_path)


def _main_make_shards(input_data_list_path, output_shards_dir, num_threads=5):
    make_shards(
        input_data_list_path,
        output_shards_dir,
        num_utts_per_shard=1000,
        prefix='t2t_osum',
        resample=16000,
        num_threads=num_threads
    )
    print("finished!!!!",os.path.join(output_shards_dir,'shards_list.txt'))

if __name__ == '__main__':
    # input_path = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/caption_chat/caption_chat_row.jsonl"
    # output_path = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/caption_chat/data.list"
    # output_shards_dir = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/caption_chat/shard_dir"
    # _main_get_data_list(input_path, output_path)
    # _main_make_shards(output_path, output_shards_dir)


    input_path = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/age_chat/age_sex_chat_row.jsonl"
    output_path = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/age_chat/data.list"
    output_shards_dir = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/age_chat/shard_dir"
    _main_get_data_list(input_path, output_path)
    _main_make_shards(output_path, output_shards_dir)

    input_path = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/emotion_chat/emotion_chat_row.jsonl"
    output_path = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/emotion_chat/data.list"
    output_shards_dir = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/emotion_chat/shard_dir"
    _main_get_data_list(input_path, output_path)
    _main_make_shards(output_path, output_shards_dir)

    input_path = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/sex_chat/age_sex_chat_row.jsonl"
    output_path = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/sex_chat/data.list"
    output_shards_dir = "/home/work_nfs11/asr_data/data/osum_data/t2t_paralanguage_chat/sex_chat/shard_dir"
    _main_get_data_list(input_path, output_path)
    _main_make_shards(output_path, output_shards_dir)


