# -*- coding: utf-8 -*-
# time: 2025/5/10 09:18
# file: tf01.py
# author: hanson
"""
小模型推理
Pythia-70m需要约1GB GPU内存

"""
import torch
from datasets import load_dataset
from peft import LoraConfig, get_peft_model
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments

from turn_found.small_tf.tf02_dataSet_modify import get_modify_dataSet

tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-70m")
model = AutoModelForCausalLM.from_pretrained("EleutherAI/pythia-70m")

# 修复方案实施
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token  # 方法1

# 加载微调数据集
fine_tuning_dataset_file = "lamini/lamini_docs"
fine_tuning_dataset = load_dataset(fine_tuning_dataset_file)
dicts = get_modify_dataSet(1)
datasets = dicts["dataSet"]

# 对输入序列进行填充(padding)时，tokenizer未配置有效的pad_token  调用tokenizer.encode()或tokenizer()时启用了padding=True但未指定填充策略 方案1实现
# 验证添加结果
print("Last train sample:", datasets["train"][-1])  # 查看最后一条训练数据
print("Last test sample:", datasets["test"][-1])  # 查看最后一条测试数据


def preprocess_function(examples):
    inputs = ["question: " + q + "\nanswer: " for q in examples["question"]]
    targets = [a for a in examples["answer"]]

    model_inputs = tokenizer(
        inputs,
        text_target=targets,
        max_length=512,
        truncation=True,
        padding="max_length",
        return_tensors="pt"
    )
    return model_inputs


training_dataset = datasets["train"].map(preprocess_function, batched=True)
test_dataset = datasets["test"].map(preprocess_function, batched=True)

training_args = TrainingArguments(
    learning_rate=5e-5,
    num_train_epochs=3,  # 设置训练轮数
    max_steps=5,
    per_device_train_batch_size=4,
    gradient_accumulation_steps=2,
    output_dir="my_finetuned",  # Remove the ./ prefix
    logging_steps=10,
    save_strategy="epoch",
    fp16=True,  # 启用混合精度训练
    eval_steps=200
)

# LoRA微调（可选高效方案
# lora_config = LoraConfig(
#     r=8,
#     lora_alpha=32,
#     target_modules=["q_proj", "k_proj", "v_proj"],
#     lora_dropout=0.05,
#     bias="none",
#     task_type="CAUSAL_LM"
# )
# model = get_peft_model(model, lora_config)
# model.print_trainable_parameters()  # 查看可训练参数占比
# 5. 训练
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=training_dataset,
    eval_dataset=test_dataset,
    tokenizer=tokenizer
)
# 开始训练
train_output = trainer.train()
#  保存模型
trainer.save_model("./my_finetuned")
tokenizer.save_pretrained("./my_finetuned")
