'''
训练数据集预处理, 载入

'''
import os
import json
from transformers import AutoTokenizer

from datasets import Dataset  # pip install datasets==2.18.0 （hugface）

def get_dataset_mycode(is_map=True):
    '''
    项目代码数据集处理逻辑
    :return:
    '''

    if os.name == 'nt':  # 'nt' 是 Windows 系统的标识符
        # print("当前系统是 Windows")
        pretrained_model_name_or_path = r'D:\code\other\LLMs\models\Qwen2.5-Coder-0.5B-Instruct'  # 模型Tokenizer路径
        dataset_path = r"D:\code\other\LLMs\local_data\output.jsonl"  # 数据路径
        # 转换保存的文件路径
        output_converted_file_path = r'D:\code\other\LLMs\local_data\converted_output.jsonl'
    else:
        pretrained_model_name_or_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2.5-Coder-0.5B-Instruct'  # 模型Tokenizer路径
        dataset_path = r"/home/ps/zhangxiancai/llm_deploy/LLMs/local_data/question_response_0.jsonl"  # 数据路径
        # 转换保存的文件路径
        output_converted_file_path = r'/home/ps/zhangxiancai/llm_deploy/LLMs/local_data/converted_output.jsonl'

    # print('1 载入数据集')
    tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, use_fast=False,
                                              trust_remote_code=True)  # 用于数据预处理

    if not os.path.exists(output_converted_file_path):
        train_dataset = Dataset.from_json(dataset_path)
        # print('111')
        converted_jsonl = []  # 格式转换
        for item_dict in train_dataset:
            for i in range(len(item_dict['question'])):
                converted_item_dict = {
                    'pathx': item_dict['pathx'],  # 文件路径
                    'lenx': item_dict['lenx'],  # 文件长度
                    'content': item_dict['content'],  # 文件内容
                    'question': item_dict['question'][i],  # 询问内容 格式转换
                    'response': item_dict['response'][i],  # 响应内容 格式转换
                    # 'meaning': [],  # 解释内容
                }
                converted_jsonl.append(converted_item_dict)
        # 将converted_jsonl保存至jsonl格式文件
        # 将 converted_jsonl 保存为 JSONL 文件
        with open(output_converted_file_path, 'w', encoding='utf-8') as f:
            for item in converted_jsonl:
                f.write(json.dumps(item, ensure_ascii=False) + '\n')

    # 总是读取output_converted_file_path
    with open(output_converted_file_path, 'r', encoding='utf-8') as f:
        converted_jsonl = [json.loads(line) for line in f]

    # for i in range(3):
    #     converted_jsonl += converted_jsonl
    # train_dataset = Dataset.from_list(converted_jsonl[:10])
    train_dataset = Dataset.from_list(converted_jsonl)
    # print('222')
    if not is_map:
        return train_dataset
    def process_func_2(example):
        """
        将数据集进行预处理：添加模板，分词，转tokenid
        千问llm模型
        """
        MAX_LENGTH = 3000
        # MAX_LENGTH = 30
        user_prompt = f"问题：{example['question']}\n结合以下资料回答问题，不超过5句话:\n{example['content']}"
        if len(user_prompt)>MAX_LENGTH:
            user_prompt = user_prompt[:MAX_LENGTH] # 这里截
        instruction = tokenizer(
            f"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n",
            add_special_tokens=False,
        )
        response = tokenizer(f"{example['response']}", add_special_tokens=False)
        pad_nb = MAX_LENGTH - (len(instruction["input_ids"]) + len(response["input_ids"]))
        pad_nb = 0 if pad_nb < 0 else pad_nb
        pad_nb = 1
        input_ids = (
                instruction["input_ids"] + response["input_ids"] + [tokenizer.pad_token_id] * pad_nb
        )  # 注意必须加pad_token_id？否则nan
        attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1] * pad_nb
        labels = (
                [-100] * len(instruction["input_ids"])
                + response["input_ids"]
                + [tokenizer.pad_token_id] * pad_nb
        )  # label mask
        # if len(input_ids) > MAX_LENGTH:  # 做一个截断
        #     input_ids = (input_ids[:MAX_LENGTH]+[tokenizer.pad_token_id])
        #     attention_mask = (attention_mask[:MAX_LENGTH]+[1])
        #     labels = (labels[:MAX_LENGTH]+[tokenizer.pad_token_id])

        return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}

    train_dataset = train_dataset.map(process_func_2, remove_columns=list(converted_jsonl[0].keys()))

    return train_dataset