'''
lora训练流程， 基于peft,
'''
import os

from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer, DataCollatorForSeq2Seq
import torch

from peft import LoraConfig, TaskType, get_peft_model

from datasets import Dataset # pip install datasets==2.18.0 （hugface）



# pretrained_model_name_or_path = r'D:\code\other\LLMs\models\Qwen2.5-Coder-0.5B-Instruct' # 模型路径
# save_path = r'D:\code\other\LLMs\local_data\train_output' # 输出路径
# dataset_path = r"D:\code\other\LLMs\local_data\output.jsonl" # 数据路径

pretrained_model_name_or_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2.5-14B-Instruct'
save_path = r'/home/ps/zhangxiancai/llm_deploy/LLMs/local_data/train_output' # 输出路径
dataset_path = r"/home/ps/zhangxiancai/llm_deploy/LLMs/local_data/question_response_0.jsonl"


# 载入预训练模型，
# os.environ['PYTORCH_CUDA_ALLOC_CONF']='expandable_segments:True'
print('0 载入预训练模型')

tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, use_fast=False, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, device_map="auto", torch_dtype=torch.float16)
model.enable_input_require_grads()  # 开启梯度检查点时，要执行该方法

# 1 载入数据集
print('1 载入数据集')
# def process_func(example):
#     """
#     将数据集进行预处理
#     千问llm模型
#     """
#     MAX_LENGTH = 384
#     input_ids, attention_mask, labels = [], [], []
#     instruction = tokenizer(
#         f"<|im_start|>system\n{example['instruction']}<|im_end|>\n<|im_start|>user\n{example['input']}<|im_end|>\n<|im_start|>assistant\n",
#         add_special_tokens=False,
#     )
#     response = tokenizer(f"{example['output']}", add_special_tokens=False)
#     input_ids = (
#             instruction["input_ids"] + response["input_ids"] + [tokenizer.pad_token_id]
#     )
#     attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1]
#     labels = (
#             [-100] * len(instruction["input_ids"])
#             + response["input_ids"]
#             + [tokenizer.pad_token_id]
#     ) # label mask
#     if len(input_ids) > MAX_LENGTH:  # 做一个截断
#         input_ids = input_ids[:MAX_LENGTH]
#         attention_mask = attention_mask[:MAX_LENGTH]
#         labels = labels[:MAX_LENGTH]
#
#     return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}
#
# # dataset_path = r"D:\code\other\LLMs\algorithms\train\data\DISC-Law-SFT-Pair-QA-released-new.jsonl"
# dataset_path = r"D:\code\other\LLMs\algorithms\train\data\DISC-Law-SFT-Pair-QA-released-new200.jsonl"
# train_ds = Dataset.from_json(dataset_path)
# train_dataset = train_ds.map(process_func)
# pass


# 项目代码jsonl数据 预处理
# item_dict = {
#     'pathx': pathx,  # 文件路径
#     'lenx': lenx,  # 文件长度
#     'content': contentx,  # 文件内容
#     'question': [],  # 询问内容
#     'response': [],  # 响应内容
#     # 'meaning': [],  # 解释内容
# }
def process_func_2(example):
    """
    将数据集进行预处理：添加模板，分词，转tokenid
    千问llm模型
    """
    MAX_LENGTH = 15000
    user_prompt = f"问题：{example['question']}\n结合以下资料回答问题，不超过5句话:\n{example['content']}"
    instruction = tokenizer(
        f"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n",
        add_special_tokens=False,
    )
    response = tokenizer(f"{example['response']}", add_special_tokens=False)
    input_ids = (
            instruction["input_ids"] + response["input_ids"] + [tokenizer.pad_token_id] # pad_token_id 151643
    ) # 注意
    attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1]
    labels = (
            [-100] * len(instruction["input_ids"])
            + response["input_ids"]
            + [tokenizer.pad_token_id]
    ) # label mask
    if len(input_ids) > MAX_LENGTH:  # 做一个截断
        input_ids = input_ids[:MAX_LENGTH]
        attention_mask = attention_mask[:MAX_LENGTH]
        labels = labels[:MAX_LENGTH]

    return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}

train_dataset = Dataset.from_json(dataset_path)

converted_jsonl = [] # 格式转换
for item_dict in train_dataset:
    for i in range(len(item_dict['question'])):
        converted_item_dict = {
            'pathx': item_dict['pathx'],  # 文件路径
            'lenx': item_dict['lenx'],  # 文件长度
            'content': item_dict['content'],  # 文件内容
            'question': item_dict['question'][i],  # 询问内容 格式转换
            'response': item_dict['response'][i],  # 响应内容 格式转换
            # 'meaning': [],  # 解释内容
        }
        converted_jsonl.append(converted_item_dict)

train_dataset = Dataset.from_list(converted_jsonl)
train_dataset = train_dataset.map(process_func_2)



# 2 构造lora模型
print('2 构造lora模型')
config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], #
    inference_mode=False, # 训练模式
    r= 4, # Lora 秩
    lora_alpha=32, # Lora alaph，具体作用参见 Lora 原理
    lora_dropout=0.1# Dropout 比例
)
model = get_peft_model(model, config)

# 3 训练
print('3 训练')
args = TrainingArguments(
    output_dir=save_path,
    per_device_train_batch_size=4,
    gradient_accumulation_steps=4,
    logging_steps=2,
    num_train_epochs=20,
    save_steps=2, # 为了快速演示，这里设置10，建议你设置成100
    learning_rate=1e-4,
    save_on_each_node=True,
    gradient_checkpointing=True,
    # 分布式训练相关的参数
    local_rank = int(os.getenv('LOCAL_RANK', -1)),  # 自动获取local rank
)
trainer = Trainer(
    model=model,
    args=args, # 训练算法参数
    train_dataset=train_dataset,
    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True), # tokenizer不变, 这里分布式训练?
)
trainer.train()

# torchrun --nproc_per_node=NUM_GPUS_YOU_HAVE your_training_script.py