import torch
import torch.nn as nn
from datasets import Dataset
from transformers import GPT2Tokenizer, GPT2LMHeadModel,Trainer,TrainingArguments

# cache_dir = "./models",  # 指定下载目录
# local_files_only = False  # 允许从网络下载

# 定义 LoRA 模块
class LoRAModule(nn.Module):
    def __init__(self, input_dim, output_dim, rank=4):
        super(LoRAModule, self).__init__()
        self.A=nn.Linear(input_dim, rank, bias=False)  # 低秩矩阵 A
        self.B=nn.Linear(rank, output_dim, bias=False)  # 低秩矩阵 B
    def forward(self, x):
        return self.B(self.A(x))
# 将 LoRA 注入到模型的线性层中
class GPTWithLoRA(nn.Module):
    def __init__(self, base_model, rank=4):
        super(GPTWithLoRA, self).__init__()
        self.base_model=base_model
        self.lora_modules=nn.ModuleDict()  # 用于存储 LoRA 模块
        for name, module in base_model.named_modules():
            if isinstance(module, nn.Linear):  # 仅在线性层中插入 LoRA
                input_dim, output_dim=module.in_features, module.out_features
                self.lora_modules[name]=LoRAModule(
                                input_dim, output_dim, rank)
    def forward(self, input_ids, attention_mask=None, labels=None):
        outputs=self.base_model(input_ids=input_ids,
                                 attention_mask=attention_mask, labels=labels)
        lora_output=0
        for name, lora in self.lora_modules.items():
            base_output=dict(self.base_model.named_modules())[name](
                                outputs.last_hidden_state)
            lora_output += lora(base_output)
        return outputs.loss, lora_output
#加载预训练模型和分词器
base_model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2",
                                                  cache_dir="i:/models/gpt2",local_files_only=False)
tokenizer = GPT2Tokenizer.from_pretrained('openai-community/gpt2',
                                          cache_dir="i:/models/gpt2",local_files_only=False)
# 设置pad_token为eos_token
tokenizer.pad_token = tokenizer.eos_token
lora_model = GPTWithLoRA(base_model, rank=4)

# 准备训练数据
data = {
    "text":[
        "今天是个好天气。",
        "我喜欢用GPT模型学习。",
        "微调技术让模型更加灵活。",
        "LoRA 技术是一种高效的微调方法。",
        "通过低秩矩阵分解减少参数量。"
    ]
}
#转换为Hugging face数据集
dataset = Dataset.from_dict(data)

def preprocess_function(examples):
    return tokenizer( examples["text"],
        padding="max_length",
        truncation=True,
        max_length=512)


tokenized_dataset = dataset.map(preprocess_function, batched=True)

#定义训练参数
training_args=TrainingArguments(
    output_dir=r"I:\models\zy_models/results",
    per_device_train_batch_size=4,
    num_train_epochs=3,
    logging_dir=r"I:\models\zy_models/logs",
    save_strategy="epoch",
    logging_steps=10
)
# 使用 Hugging Face Trainer 进行训练
trainer=Trainer(
    model=lora_model,
    args=training_args,
    train_dataset=tokenized_dataset,
    tokenizer=tokenizer,
)
# 开始训练
print("开始训练 LoRA 微调模型...")
trainer.train()
# 保存微调后的模型
print("保存模型...")
lora_model.save_pretrained(r"I:\models\zy_models/lora_finetuned_model")
tokenizer.save_pretrained(r"I:\models\zy_models/lora_finetuned_model")
