'''
lora+可视化
transformers库 + swanlab库（==plot）
swanlab:
https://github.com/SwanHubX/SwanLab
https://docs.swanlab.cn/guide_cloud/integration/integration-huggingface-transformers.html
https://blog.csdn.net/SoulmateY/article/details/138539272

https://swanlab.cn/@zhangxiancai
'''

from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer, DataCollatorForSeq2Seq
import torch

from peft import LoraConfig, TaskType, get_peft_model

from datasets import Dataset # pip install datasets==2.18.0 （hugface）

def get_swanlab_callback():
    # pip install swanlab==0.3.25
    import swanlab
    from swanlab.integration.transformers import SwanLabCallback

    class HuanhuanSwanLabCallback(SwanLabCallback):
        def predict(self, messages, model, tokenizer): # 训练时推理
            device = "cuda"
            text = tokenizer.apply_chat_template(
                messages, tokenize=False, add_generation_prompt=True
            )
            model_inputs = tokenizer([text], return_tensors="pt").to(device)

            generated_ids = model.generate(model_inputs.input_ids, max_new_tokens=512)
            generated_ids = [
                output_ids[len(input_ids):]
                for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
            ]
            response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

            if torch.cuda.is_available():  # 检查是否可用CUDA
                with torch.cuda.device(device):  # 指定CUDA设备
                    torch.cuda.empty_cache()  # 清空CUDA缓存
                    torch.cuda.ipc_collect()  # 收集CUDA内存碎片
            return response

        def on_train_begin(self, args, state, control, model=None, **kwargs):
            if not self._initialized:
                self.setup(args, state, model, **kwargs)

            print("训练开始")
            print("未开始微调，先取3条主观评测：")
            test_text_list = []
            # test_df = kwargs['test_df']
            # predict = kwargs['predict']
            # peft_model = kwargs['peft_model']

            for index, row in test_df[:1].iterrows():
                instruction = row["instruction"]
                input_value = row["input"]

                messages = [
                    {"role": "system", "content": f"{instruction}"},
                    {"role": "user", "content": f"{input_value}"},
                ]

                response = self.predict(messages, peft_model, tokenizer) # 训练时推理
                messages.append({"role": "assistant", "content": f"{response}"})

                result_text = f"【Q】{messages[1]['content']}\n【LLM】{messages[2]['content']}\n"
                print(result_text)

                test_text_list.append(swanlab.Text(result_text, caption=response))

            swanlab.log({"训练前推理": test_text_list}, step=0)

        def on_epoch_end(self, args, state, control, **kwargs):
            # ===================测试阶段======================
            test_text_list = []
            for index, row in test_df[:1].iterrows():
                instruction = row["instruction"]
                input_value = row["input"]
                ground_truth = row["output"]

                messages = [
                    {"role": "system", "content": f"{instruction}"},
                    {"role": "user", "content": f"{input_value}"},
                ]

                response = self.predict(messages, peft_model, tokenizer)
                messages.append({"role": "assistant", "content": f"{response}"})

                if index == 0:
                    print("epoch", round(state.epoch), "主观评测：")

                result_text = f"【Q】{messages[1]['content']}\n【LLM】{messages[2]['content']}\n【GT】 {ground_truth}"
                print(result_text)

                test_text_list.append(swanlab.Text(result_text, caption=response))

            swanlab.log({"训练后推理": test_text_list}, step=round(state.epoch)) # epoch = state.epoch
    # print('swanlab.login()')
    # swanlab.login()
    # print('HuanhuanSwanLabCallback')
    swanlab_callback = HuanhuanSwanLabCallback(
        project="Qwen2.5-Coder-LoRA-Law",
        experiment_name="7b",
        config={
            "model": "https://modelscope.cn/models/Qwen/Qwen2.5-Coder-7B-Instruct",
            "dataset": "https://huggingface.co/datasets/ShengbinYue/DISC-Law-SFT",
            "github": "https://github.com/datawhalechina/self-llm",
            "system_prompt": "你是一个法律专家，请根据用户的问题给出专业的回答",
            "lora_rank": 64,
            "lora_alpha": 16,
            "lora_dropout": 0.1,
            # 'test_df': test_df
        },

    )
    return swanlab_callback


# lora 训练流程
# 载入预训练模型，
print('0 载入预训练模型')
pretrained_model_name_or_path = r'D:\code\other\LLMs\models\Qwen2.5-Coder-0.5B-Instruct'
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, use_fast=False, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, device_map="auto", torch_dtype=torch.float16)
model.enable_input_require_grads()  # 开启梯度检查点时，要执行该方法

# 1 载入数据集
print('1 载入数据集')
def process_func(example):
    """
    将数据集进行预处理
    千问llm模型
    """
    MAX_LENGTH = 384
    input_ids, attention_mask, labels = [], [], []
    instruction = tokenizer(
        f"<|im_start|>system\n{example['instruction']}<|im_end|>\n<|im_start|>user\n{example['input']}<|im_end|>\n<|im_start|>assistant\n",
        add_special_tokens=False,
    )
    response = tokenizer(f"{example['output']}", add_special_tokens=False)
    input_ids = (
            instruction["input_ids"] + response["input_ids"] + [tokenizer.pad_token_id]
    )
    attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1]
    labels = (
            [-100] * len(instruction["input_ids"])
            + response["input_ids"]
            + [tokenizer.pad_token_id]
    )
    if len(input_ids) > MAX_LENGTH:  # 做一个截断
        input_ids = input_ids[:MAX_LENGTH]
        attention_mask = attention_mask[:MAX_LENGTH]
        labels = labels[:MAX_LENGTH]

    return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}

# dataset_path = r"D:\code\other\LLMs\algorithms\train\data\DISC-Law-SFT-Pair-QA-released-new.jsonl"
dataset_path = r"D:\code\other\LLMs\algorithms\train\data\DISC-Law-SFT-Pair-QA-released-new200.jsonl"
train_ds = Dataset.from_json(dataset_path)
train_dataset = train_ds.map(process_func)
import pandas as pd
test_df = pd.read_json(dataset_path, lines=True)[:5] # DataFrame
pass


# 2 构造lora模型
print('2 构造lora模型')
config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], #
    inference_mode=False, # 训练模式
    r=4, # Lora 秩
    lora_alpha=32, # Lora alaph，具体作用参见 Lora 原理
    lora_dropout=0.1# Dropout 比例
)
peft_model = get_peft_model(model, config)

# 3 训练
print('3 训练')
args = TrainingArguments(
    output_dir="./output/Qwen2.5-Coder-0.5B-Instruct-lora",
    per_device_train_batch_size=2,
    gradient_accumulation_steps=4,
    logging_steps=10,
    num_train_epochs=2,
    save_steps=10, # 为了快速演示，这里设置10，建议你设置成100
    learning_rate=1e-4,
    save_on_each_node=True,
    gradient_checkpointing=True
)
# print('3 get_swanlab_callback')
swanlab_callback = get_swanlab_callback()
# print('3 get_swanlab_callback after')
trainer = Trainer(
    model=peft_model,
    args=args, # 训练算法参数
    train_dataset=train_dataset,
    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True), # tokenizer不变
    callbacks=[swanlab_callback], # 添加SwanLab可视化回调函数
)
trainer.train()






