# -*- coding: utf-8 -*-
# time: 2025/4/10 16:19
# file: finetune_qwen_hf.py.py
# author: hanson
import os

import torch
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://hf-mirror.com")
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling
)
from peft import LoraConfig, get_peft_model
from datasets import load_dataset


# 1. 加载数据集（使用HuggingFace Datasets）
def load_data():
    dataset = load_dataset(
         #r"/mnt/workspace/.cache/modelscope/datasets/alpaca-gpt4-data-zh/train.csv",  # Alpaca-GPT4中文版
        "Roselia-penguin/medical-dialogue-Chinese",
        split="train"
    )
    return dataset.select(range(10))  # 取2000条示例


# 2. 数据预处理
def preprocess_data(tokenizer, dataset):
    def format_example(ex):
        # 清理数据中的None值
        instruction = ex["instruction"] or ""
        input_text = ex["input"] or ""
        output = ex["output"] or ""

        text = f"Instruction: {instruction.strip()}\n"
        if input_text:
            text += f"Input: {input_text.strip()}\n"
        text += f"Output: {output.strip()}"
        return {"text": text}

    formatted = dataset.map(format_example, remove_columns=dataset.column_names)

    # 然后单独进行tokenize（关键修改）
    def tokenize_function(examples):
        return tokenizer(
            examples["text"],
            truncation=True,
            max_length=512,
            padding="max_length",  # 添加padding
            return_tensors="pt"  # 返回PyTorch tensor
        )

    return formatted.map(
        tokenize_function,
        batched=True,
        num_proc=4,
        remove_columns=["text"]  # 移除原始文本列
    )


# 3. 加载模型
model_id = "qwen/qwen2-1.5b-instruct"
#model_id = r"E:\soft\model\qwen\Qwen\Qwen2___5-0___5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
# 设置pad_token（重要！）
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype=torch.bfloat16,
    device_map="auto",
    trust_remote_code=True
)

# 4. 配置LoRA
peft_config = LoraConfig(
    r=8,
    lora_alpha=32,
    target_modules=["q_proj", "k_proj", "v_proj"],
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM"
)
model = get_peft_model(model, peft_config)


# 5. 训练
def train():
    dataset = load_data()
    tokenized_data = preprocess_data(tokenizer, dataset)
    # 数据校对器（关键修改）
    data_collator = DataCollatorForLanguageModeling(
        tokenizer=tokenizer,
        mlm=False,
        pad_to_multiple_of=8
    )
    trainer = Trainer(
        model=model,
        args=TrainingArguments(
            output_dir="./qwen2-0.5b-finetuned2",
            per_device_train_batch_size=4,
            gradient_accumulation_steps=2,
            learning_rate=2e-5,
            num_train_epochs=1,
            logging_steps=10,
            save_strategy="epoch",
            #fp16=True,
            bf16=torch.cuda.is_bf16_supported(),  # 使用bfloat16如果支持
            remove_unused_columns=True,  # 改为True
            report_to="tensorboard"
           # optim="adamw_torch"  # 显式指定优化器
        ),
        data_collator=data_collator,
        train_dataset=tokenized_data,
    )
    trainer.train()

    # 保存完整模型（合并适配器）
    model.save_pretrained("./qwen2-0.5b-finetuned2")
    tokenizer.save_pretrained("./qwen2-0.5b-finetuned2")
    print("模型已保存到 ./qwen2-0.5b-finetuned2")


if __name__ == "__main__":
    train()
