from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from datasets import load_dataset
import torch
from torch.utils.data import DataLoader
from configs.config import peft_config
import logging
from tqdm import tqdm
import os
from torch.nn.utils import clip_grad_norm_


model_dir = "/root/autodl-tmp/"

# 使用开源的奖励模型（CodeBERTa风格）
REWARD_MODEL_NAME = model_dir + "microsoft/codebert-base"  # 微软开源的代码理解模型
tokenizer_name = model_dir + "meta-llama/Llama-2-7b-hf"
# 加载数据集
def create_ppo_dataset():
    # 加载数据集
    dataset = load_dataset("codeparrot/github-code-review", split="train")
    
    # 数据清洗和过滤
    def preprocess_function(example):
        if not example['code'] or len(example['code']) < 10:  # 过滤空代码或过短代码
            return None
        
        return {
            "query": f"Review this Python code:\n{example['code']}\n### Response:",
            "code_length": len(example['code'])
        }
    
    # 应用预处理并过滤无效样本
    dataset = dataset.map(preprocess_function, remove_columns=dataset.column_names)
    dataset = dataset.filter(lambda x: x is not None)
    
    # 划分训练集和验证集
    dataset = dataset.train_test_split(test_size=0.1, seed=42)
    
    return dataset

# 初始化数据加载器
def create_dataloaders(dataset, batch_size=8):
    train_dataloader = DataLoader(
        dataset['train'], 
        batch_size=batch_size,
        shuffle=True,
        collate_fn=lambda batch: [item["query"] for item in batch]
    )
    
    val_dataloader = DataLoader(
        dataset['test'], 
        batch_size=batch_size,
        shuffle=False,
        collate_fn=lambda batch: [item["query"] for item in batch]
    )
    
    return train_dataloader, val_dataloader

# 使用更多数据
ppo_dataset = create_ppo_dataset()
train_dataloader, val_dataloader = create_dataloaders(ppo_dataset, batch_size=8)

# 加载带值头的模型
model = AutoModelForCausalLMWithValueHead.from_pretrained(
    "./qlora-output",
    device_map="auto",
    torch_dtype=torch.float16,
    peft_config=peft_config
)
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)

# 加载开源奖励模型
class CodeRewardWrapper(torch.nn.Module):
    def __init__(self):
        super().__init__()
        # 直接使用基础模型，移除适配器相关代码
        self.base_model = AutoModelForSequenceClassification.from_pretrained(
            REWARD_MODEL_NAME,
            num_labels=1
        )
        self.base_model.eval()  # 设置为评估模式
        
    def forward(self, text):
        try:
            inputs = tokenizer(
                text,
                padding=True,
                truncation=True,
                max_length=512,
                return_tensors="pt"
            ).to("cuda")
            
            with torch.no_grad():
                outputs = self.base_model(**inputs).logits
            
            # 添加奖励截断和归一化
            rewards = torch.clamp(outputs, min=-10, max=10)  # 截断极端值
            return rewards
            
        except Exception as e:
            print(f"奖励计算出错: {str(e)}")
            return torch.zeros(len(text), 1).to("cuda")  # 返回零奖励作为fallback

reward_model = CodeRewardWrapper().cuda()

# PPO配置优化
ppo_config = PPOConfig(
    batch_size=8,
    mini_batch_size=4,
    ppo_epochs=2,
    learning_rate=1.5e-5,
    init_kl_coef=0.15,
    adap_kl_ctrl=True,
    target_kl=0.8,  # 新增KL散度控制
    cliprange=0.2,
    cliprange_value=0.2,
    vf_coef=0.1
)

# 初始化PPO训练器
ppo_trainer = PPOTrainer(
    model=model,
    config=ppo_config,
    tokenizer=tokenizer,
    dataset=ppo_dataset  # 自动处理数据格式
)

# 生成参数优化
generation_kwargs = {
    "min_length": 32,
    "top_k": 0.6,
    "top_p": 0.95,
    "do_sample": True,
    "pad_token_id": tokenizer.eos_token_id,
    "max_new_tokens": 256,
    "temperature": 0.8,
    "repetition_penalty": 1.1
}

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('train_rl.log'),
        logging.StreamHandler()
    ]
)

# 添加早停机制
class EarlyStopping:
    def __init__(self, patience=3, min_delta=0):
        self.patience = patience
        self.min_delta = min_delta
        self.counter = 0
        self.best_loss = None
        self.early_stop = False
        
    def __call__(self, val_loss):
        if self.best_loss is None:
            self.best_loss = val_loss
        elif val_loss > self.best_loss - self.min_delta:
            self.counter += 1
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_loss = val_loss
            self.counter = 0

# 修改训练循环
def train():
    early_stopping = EarlyStopping(patience=3)
    best_reward = float('-inf')
    
    for epoch in range(3):
        model.train()
        epoch_rewards = []
        
        # 训练循环
        for batch_idx, queries in enumerate(tqdm(train_dataloader, desc=f"Epoch {epoch+1}")):
            try:
                # 生成回复
                inputs = tokenizer(queries, padding=True, return_tensors="pt").to("cuda")
                response_tensors = ppo_trainer.generate(
                    inputs.input_ids,
                    attention_mask=inputs.attention_mask,
                    **generation_kwargs
                )
                
                # 计算奖励
                responses = [tokenizer.decode(r, skip_special_tokens=True) for r in response_tensors]
                rewards = reward_model(responses)
                
                # 奖励标准化
                rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-8)
                epoch_rewards.extend(rewards.tolist())
                
                # PPO更新
                stats = ppo_trainer.step(queries, response_tensors, rewards)
                
                # 梯度裁剪
                clip_grad_norm_(model.parameters(), max_norm=1.0)
                
                # 记录日志
                if batch_idx % 10 == 0:
                    logging.info(
                        f"Epoch {epoch+1}/{3} | Batch {batch_idx+1}/{len(train_dataloader)}\n"
                        f"Avg Reward: {rewards.mean().item():.3f} | KL Div: {stats['objective/kl']:.3f}\n"
                        f"Policy Loss: {stats['ppo/policy/mean']:.3f} | Value Loss: {stats['ppo/val/mean']:.3f}"
                    )
                
            except Exception as e:
                logging.error(f"训练出错: {str(e)}")
                continue
        
        # 验证步骤
        model.eval()
        val_rewards = []
        with torch.no_grad():
            for val_queries in tqdm(val_dataloader, desc="Validation"):
                val_inputs = tokenizer(val_queries, padding=True, return_tensors="pt").to("cuda")
                val_responses = ppo_trainer.generate(
                    val_inputs.input_ids,
                    attention_mask=val_inputs.attention_mask,
                    **generation_kwargs
                )
                val_decoded = [tokenizer.decode(r, skip_special_tokens=True) for r in val_responses]
                val_reward = reward_model(val_decoded).mean().item()
                val_rewards.append(val_reward)
        
        avg_val_reward = sum(val_rewards) / len(val_rewards)
        logging.info(f"Validation Average Reward: {avg_val_reward:.3f}")
        
        # 保存最佳模型
        if avg_val_reward > best_reward:
            best_reward = avg_val_reward
            model.save_pretrained(os.path.join("checkpoints", f"best_model_epoch_{epoch+1}"))
            logging.info(f"保存最佳模型 checkpoint/best_model_epoch_{epoch+1}")
        
        # 早停检查
        early_stopping(avg_val_reward)
        if early_stopping.early_stop:
            logging.info("触发早停机制，停止训练")
            break

def print_trainable_parameters(model):
    trainable_params = 0
    all_params = 0
    for _, param in model.named_parameters():
        all_params += param.numel()
        if param.requires_grad:
            trainable_params += param.numel()
    print(
        f"可训练参数: {trainable_params:,d} ({100 * trainable_params / all_params:.2f}%)\n"
        f"所有参数: {all_params:,d}"
    )

if __name__ == "__main__":
    try:
        train()
    except KeyboardInterrupt:
        logging.info("手动中断训练")
        # 保存最终模型
        model.save_pretrained("checkpoints/interrupted_model")
    except Exception as e:
        logging.error(f"训练异常: {str(e)}")