from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments, DataCollatorForLanguageModeling
from peft import LoraConfig, get_peft_model
from datasets import load_dataset
import wandb
import os
from accelerate import Accelerator

os.environ["CUDA_VISIBLE_DEVICES"] = "6"

# 禁用DeepSpeed
os.environ["ACCELERATE_USE_DEEPSPEED"] = "False"

# 创建自定义加速器配置
# 使用device_placement=True确保张量被移动到正确的设备
accelerator = Accelerator(
    mixed_precision="fp16", 
    cpu=False, 
    deepspeed_plugin=None,
    device_placement=True
)

class DataLoader:
    ## 加载数据
    def __init__(self, data_path=None):
        self.data_path = data_path

    ## 加载数据
    def load_data(self, tokenizer=None):
        dataset = load_dataset("json", data_files=self.data_path)
        split_dataset = dataset["train"].train_test_split(test_size=0.1)
        
        if tokenizer:
            processed_train_data = self.data_process(split_dataset["train"], tokenizer)
            processed_test_data = self.data_process(split_dataset["test"], tokenizer)
            return processed_train_data, processed_test_data
        else:
            return split_dataset["train"], split_dataset["test"]
    
    ## 数据处理
    def data_process(self, datas, tokenizer):
        MAX_LENGTH = 384
        processed_data = []
        for data in datas:
            # 处理系统提示和用户输入
            system_user_prompt = f"<|im_start|>system\n你是一名超级助手<|im_end|>\n<|im_start|>user\n{data['conversations'][0]['value']}<|im_end|>\n"
            instruction = tokenizer(system_user_prompt, 
                                  add_special_tokens=False,
                                  padding='max_length',        # 添加padding
                                  truncation=True,             # 添加truncation
                                  max_length=MAX_LENGTH)
            
            # 处理助理回复
            assistant_response = f"<|im_start|>assistant\n{data['conversations'][1]['value']}<|im_end|>\n"
            response = tokenizer(assistant_response, 
                               add_special_tokens=False,
                               padding='max_length',           # 添加padding
                               truncation=True,                # 添加truncation
                               max_length=MAX_LENGTH)
            
            # 拼接输入和输出
            input_ids = instruction["input_ids"] + response["input_ids"][:MAX_LENGTH - len(instruction["input_ids"])]
            # 修改标签，确保padding也被标记为-100
            labels = [-100] * len(instruction["input_ids"]) + response["input_ids"][:MAX_LENGTH - len(instruction["input_ids"])]
            
            # 确保长度一致
            if len(input_ids) < MAX_LENGTH:
                padding_length = MAX_LENGTH - len(input_ids)
                input_ids.extend([tokenizer.pad_token_id] * padding_length)
                labels.extend([-100] * padding_length)
            
            processed_data.append({
                "input_ids": input_ids,
                "labels": labels,
                "attention_mask": [1] * MAX_LENGTH  # 添加attention_mask
            })
        
        return processed_data

class ModelLoader:
    ## 加载模型
    def __init__(self, model_path=None):
        self.model_path = model_path
    ## 加载模型
    def load_model(self):
        return AutoModelForCausalLM.from_pretrained(self.model_path, 
                                                    device_map="auto",
                                                    trust_remote_code=True).eval()

    ## 加载tokenizer
    def load_tokenizer(self):
        return AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True)
    
class Predictor:
    ## 预测
    def __init__(self, model, tokenizer, max_new_tokens=100):
        self.model = model
        self.tokenizer = tokenizer
        self.max_new_tokens = max_new_tokens

    ## 预测
    def predict(self, input_text):
        input_ids = self.input_encode(input_text)
        output_ids = self.model.generate(input_ids, max_new_tokens=self.max_new_tokens)
        return self.output_decode(output_ids, input_ids)
    
    ## 输入向量化
    def input_encode(self, input_text):
        formatted_text = self.tokenizer.apply_chat_template(input_text, add_generation_prompt=True, tokenize=False)
        return self.tokenizer(formatted_text, return_tensors="pt").to(self.model.device).input_ids
    
    ## 输出解码
    def output_decode(self, output_ids, input_ids):
        return self.tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)

## 训练模型
class BaseTrainer:
    ## 训练
    def __init__(self, model=None, tokenizer=None, data=None, save_path=None, wandb_logger=None):
        self.model = model
        self.tokenizer = tokenizer
        self.data = data
        self.save_path = save_path
        self.lora_config = self.get_lora_config()          ## 设置lora配置
        self.wandb_logger = wandb_logger
        self.data_collator = DataCollatorForLanguageModeling(
            tokenizer=self.tokenizer,
            mlm=False                               # 这表示我们在做因果语言模型训练而不是掩码语言模型训练
        )

    def get_train_args(self):
        return TrainingArguments(
            output_dir=self.save_path,              # 模型保存路径
            num_train_epochs=5,                     # 训练轮次
            per_device_train_batch_size=2,          # 单卡批次大小（根据显存调整）
            gradient_accumulation_steps=8,          # 梯度累积（模拟更大批次）
            learning_rate=3e-4,                     # 初始学习率
            fp16=True,                              # 混合精度训练（显存优化）
            logging_steps=50,                       # 日志记录间隔
            save_steps=1000,                        # 模型保存间隔
            evaluation_strategy="no",               # 关闭评估（节省时间）
            deepspeed=None,                         # 禁用 DeepSpeed
        )
        
    def get_lora_config(self):
        lora_config = LoraConfig(
            task_type="CAUSAL_LM",
            target_modules=["q_proj", "v_proj"],
            inference_mode=False,
            r=8,
            lora_alpha=16,
            lora_dropout=0.1,
            bias="none",
        )
        return lora_config
    
    ## 训练
    def train(self):
        trainer_args = {
            "model": get_peft_model(self.model, self.lora_config),
            "args": self.get_train_args(),
            "train_dataset": self.data,
            "data_collator": self.data_collator,
        }
        
        # 仅当 wandb_logger 不为 None 时添加
        if self.wandb_logger is not None:
            trainer_args["callback_handler"] = self.wandb_logger
        
        trainer = Trainer(**trainer_args)
        trainer.train()  # 开始训练
        return trainer  # 返回整个trainer对象
    
class WandbCallback:
    ## 回调
    def __init__(self, project_name='None', 
                 model_name='None',
                 api_key='None'):
        self.project_name = project_name
        self.model_name = model_name
        self.api_key = api_key
        
    def callback(self):
        # 先尝试结束可能存在的会话
        try:
            wandb.finish()
        except:
            pass
        
        # 尝试登录
        try:
            wandb.login(key=self.api_key)
            return wandb.init(project=self.project_name, name=self.model_name)
        except Exception as e:
            print(f"WandB登录失败: {e}")
            return None

## 加载模型
model_loader = ModelLoader(model_path='/mnt/data2/ghz/models/Qwen2.5-7B-Instruct')
model = model_loader.load_model()
tokenizer = model_loader.load_tokenizer()

## 加载数据
data_loader = DataLoader(data_path='/mnt/data2/ghz/data/Belle_sampled_qwen.json')
train_data, test_data = data_loader.load_data(tokenizer)

## 预测验证
predictor = Predictor(model, tokenizer)
messages = [{"role": "user", "content": "你好"}]
response = predictor.predict(messages)
print(response)

## 训练模型
# callback = WandbCallback(project_name='fineturning', model_name='qwen2.5-7b-instruct', api_key='6b80dcfd0ac0ab0f4289f31acd90dddbd8bdff58')
# wandb_logger = callback.callback()
trainer = BaseTrainer(model=model, tokenizer=tokenizer, data=train_data, save_path="./fineturned_models")
trained_trainer = trainer.train()

## 保存模型 - 使用Trainer的保存方法
trained_trainer.save_model("./fineturned_models")

