import json
from tqdm import *

def data_process():
    # 最终结果
    all_results = []
    conversation_id = 0
    data_path = 'datasets/single_turn_dataset_1.json'
    output_path = 'datasets_output/single_train.jsonl'
    with open(data_path, 'r', encoding='utf-8') as file:
        data_json = json.load(file)
        # 打开输出文件（JSONL格式）
        with open(output_path, 'w', encoding='utf-8') as output_file:
            # 处理数据
            for i, data in enumerate(data_json):
                # id号
                conversation_id = i + 1
                # 由于是单轮对话，因此不用循环
                conversation = []

                try:
                    human_text = data["prompt"]
                    assistant_text = data["completion"]

                    conversation_texts = {"human": human_text, "assistant": assistant_text}
                    conversation.append(conversation_texts)
                except KeyError:
                    # 如果数据没有完整的“prompt”和“completion”字段，跳过
                    continue

                result = {"conversation_id": conversation_id, "conversation": conversation}

                all_results.append(result)
    # 写入到输出文件中
    with open(output_path, 'a', encoding='utf-8') as file:
        for item in tqdm(all_results, desc="Writing to File"):
            file.write(json.dumps(item, ensure_ascii=False) + '\n')

    print(f"将 {len(data_json)} 条数据保存到{output_path}中")

    data_path = 'datasets/single_turn_dataset_2.json'
    output_path = 'datasets_output/single_train.jsonl'
    with open(data_path, 'r', encoding='utf-8') as file:
        data_json = json.load(file)
        # 打开输出文件（JSONL格式）
        with open(output_path, 'w', encoding='utf-8') as output_file:
            # 处理数据
            for i, data in enumerate(data_json):
                # id号
                conversation_id = conversation_id+ 1
                # 由于是单轮对话，因此不用循环
                conversation = []

                try:
                    human_text = data["prompt"]
                    assistant_text = data["completion"]

                    conversation_texts = {"human": human_text, "assistant": assistant_text}
                    conversation.append(conversation_texts)
                except KeyError:
                    # 如果数据没有完整的“prompt”和“completion”字段，跳过
                    continue

                result = {"conversation_id": conversation_id, "conversation": conversation}

                all_results.append(result)
    # 写入到输出文件中
    with open(output_path, 'a', encoding='utf-8') as file:
        for item in tqdm(all_results, desc="Writing to File"):
            file.write(json.dumps(item, ensure_ascii=False) + '\n')
            
    print(f"将 {len(data_json)} 条数据保存到{output_path}中")

def data_process_mutil():
    # 最终结果
    all_results = []
    data_path = 'datasets/multi_turn_dataset_1.json'
    output_path = 'datasets_output/multi_train.jsonl'
    with open(data_path, 'r', encoding='utf-8') as file:
        data_json = json.load(file)
        # 打开输出文件（JSONL格式）
        with open(output_path, 'w', encoding='utf-8') as output_file:
            # 处理数据
            for i, data in enumerate(data_json):
                # id号
                conversation_id = i + 1
                # 由于是单轮对话，因此不用循环
                conversation = []
                try:
                    for key in data:
                        for value in data[key]:
                            for subkey in value:
                                if subkey=='input':
                                    human_text = value[subkey]
                                else:
                                    assistant_text = value[subkey]
                
                            conversation_texts = {"human": human_text, "assistant": assistant_text}
                            conversation.append(conversation_texts)
                except KeyError:
                    # 如果数据没有完整的“prompt”和“completion”字段，跳过
                    continue

                result = {"conversation_id": conversation_id, "conversation": conversation}

                all_results.append(result)
    # 写入到输出文件中
    with open(output_path, 'a', encoding='utf-8') as file:
        for item in tqdm(all_results, desc="Writing to File"):
            file.write(json.dumps(item, ensure_ascii=False) + '\n')

    print(f"将 {len(data_json)} 条数据保存到{output_path}中")
    data_path = 'datasets/multi_turn_dataset_2.json'
    output_path = 'datasets_output/multi_train.jsonl'
    with open(data_path, 'r', encoding='utf-8') as file:
        data_json = json.load(file)
        # 打开输出文件（JSONL格式）
        with open(output_path, 'w', encoding='utf-8') as output_file:
            # 处理数据
            for i, data in enumerate(data_json):
                # id号
                conversation_id = conversation_id + 1
                # 由于是单轮对话，因此不用循环
                conversation = []
                try:
                    for key in data:
                        for value in data[key]:
                            for subkey in value:
                                if subkey=='input':
                                    human_text = value[subkey]
                                else:
                                    assistant_text = value[subkey]
                
                            conversation_texts = {"human": human_text, "assistant": assistant_text}
                            conversation.append(conversation_texts)
                except KeyError:
                    # 如果数据没有完整的“prompt”和“completion”字段，跳过
                    continue

                result = {"conversation_id": conversation_id, "conversation": conversation}

                all_results.append(result)
    # 写入到输出文件中
    with open(output_path, 'a', encoding='utf-8') as file:
        for item in tqdm(all_results, desc="Writing to File"):
            file.write(json.dumps(item, ensure_ascii=False) + '\n')

    print(f"将 {len(data_json)} 条数据保存到{output_path}中")

def process_data_tokenizer(data: dict, tokenizer, max_seq_length):
    # 处理数据
    conversation = data["conversation"]
    input_ids, attention_mask, labels = [], [], []

    for i, conv in enumerate(conversation):
        human_text = conv["human"].strip()
        assistant_text = conv["assistant"].strip()

        input_text = "Human:" + human_text + "\n\nnAssistant:"

        input_tokenizer = tokenizer(
            input_text,
            add_special_tokens=False,
            truncation=True,
            padding=False,
            return_tensors=None,
        )
        output_tokenizer = tokenizer(
            assistant_text,
            add_special_tokens=False,
            truncation=True,
            padding=False,
            return_tensors=None,
        )

        input_ids += (
                input_tokenizer["input_ids"] + output_tokenizer["input_ids"] + [tokenizer.eos_token_id]
        )
        attention_mask += input_tokenizer["attention_mask"] + output_tokenizer["attention_mask"] + [1]
        labels += ([-100] * len(input_tokenizer["input_ids"]) + output_tokenizer["input_ids"] + [tokenizer.eos_token_id]
                   )

    if len(input_ids) > max_seq_length:  # 做一个截断
        input_ids = input_ids[:max_seq_length]
        attention_mask = attention_mask[:max_seq_length]
        labels = labels[:max_seq_length]
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }

if __name__=='__main__':
    # data_process()

    data_process_mutil()