import json
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
from tqdm import tqdm

# 处理数据集，这次使用单轮对话来微调
## 合并数据集，其中一个数据集大概1.5w，两个合并大概3w条数据，回复比较人性化，观察最后微调的结果是否能够像数据集一样变得比较人性化
def len_data(data_path):
    with open(data_path, 'r') as file:
        data = json.load(file)
    # 获取字典中的条目数
    num_entries = len(data)
    return num_entries

# 合并数据集，总共3w条数据
def merge_data(data1_path,data2_path):
    # 读取第一个JSON文件
    with open(data1_path, 'r', encoding='utf-8') as file:
        data1 = json.load(file)

    # 读取第二个JSON文件
    with open(data2_path, 'r', encoding='utf-8') as file:
        data2 = json.load(file)

    # 合并两个列表
    merged_data = data1 + data2

    # 如果需要写入合并后的数据到一个新文件
    with open('./data/merged_data.json', 'w', encoding='utf-8') as file:
        json.dump(merged_data, file, ensure_ascii=False, indent=4)

    # 输出合并后数据的条目数
    print(f"合并后的数据集包含 {len(merged_data)} 条数据。")

# 处理数据，把原数据集格式更改，然后保存成jsonl格式，好进行后续的处理
def data_process(data_path,output_path):
    # 最终结果
    all_results = []
    with open(data_path, 'r', encoding='utf-8') as file:
        data_json = json.load(file)
        # 打开输出文件（JSONL格式）
        with open(output_path, 'w', encoding='utf-8') as output_file:
            # 处理数据
            for i, data in enumerate(data_json):
                # id号
                conversation_id = i + 1
                # 由于是单轮对话，因此不用循环
                conversation = []

                try:
                    human_text = data["prompt"]
                    assistant_text = data["completion"]

                    conversation_texts = {"human": human_text, "assistant": assistant_text}
                    conversation.append(conversation_texts)
                except KeyError:
                    # 如果数据没有完整的“prompt”和“completion”字段，跳过
                    continue

                result = {"conversation_id": conversation_id, "conversation": conversation}

                all_results.append(result)
    # 写入到输出文件中
    with open(output_path, 'w', encoding='utf-8') as file:
        for item in tqdm(all_results, desc="Writing to File"):
            file.write(json.dumps(item, ensure_ascii=False) + '\n')

    print(f"将 {len(data_json)} 条数据保存到{output_path}中")

# 按照微调的要求预处理数据
def finetune_data(data: dict, tokenizer, max_seq_length):
    # 处理数据
    conversation = data["conversation"]
    input_ids, attention_mask, labels = [], [], []

    for i,conv in enumerate(conversation):
        human_text = conv["human"]
        assistant_text = conv["assistant"]

        input_text = "human:"+human_text + "\n\nassistant:"

        input_tokenizer = tokenizer(
            input_text,
            add_special_tokens=False,
            truncation=True,
            padding=False,
            return_tensors=None,
        )
        output_tokenizer = tokenizer(
            assistant_text,
            add_special_tokens=False,
            truncation=True,
            padding=False,
            return_tensors=None,
        )

        input_ids += (
                input_tokenizer["input_ids"]+output_tokenizer["input_ids"]+[tokenizer.eos_token_id]
        )
        attention_mask += input_tokenizer["attention_mask"]+output_tokenizer["attention_mask"]+[1]
        labels += ([-100]*len(input_tokenizer["input_ids"])+output_tokenizer["input_ids"]+[tokenizer.eos_token_id]
        )

    if len(input_ids) > max_seq_length:  # 做一个截断
        input_ids = input_ids[:max_seq_length]
        attention_mask = attention_mask[:max_seq_length]
        labels = labels[:max_seq_length]
    return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}

if __name__ == '__main__':
    data_path = './data/merged_data.json'
    output_path = './data/single_datas.jsonl'
    # data_process(data_path, output_path)

    model_path = "./model"
    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)

    with open(output_path, "r", encoding="utf-8") as f:
        data = [json.loads(line) for line in f]

    data = data[0]
    print(data)
    max_seq_length = 2048
    print(finetune_data(data,tokenizer,max_seq_length))
