import torch
from peft import LoraConfig, TaskType, get_peft_model
from modelscope import AutoTokenizer, BitsAndBytesConfig, AutoModelForCausalLM
from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer
from datasets import Dataset
import json
import config

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# 模型加载与量化配置
model_path = config.model_name
tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenizer.padding_side = "right"
tokenizer.pad_token = tokenizer.eos_token

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type='nf4',
    bnb_4bit_use_double_quant=True,
    bnb_4bit_compute_dtype=torch.float16
)

# 加载基础模型
base_model = AutoModelForCausalLM.from_pretrained(
    model_path,
    quantization_config=bnb_config,
    torch_dtype=torch.float16,
    low_cpu_mem_usage=True
).to(device)

# 配置LoRA
peft_config = LoraConfig(
    r=2,  # 建议使用更大值
    lora_alpha=128,
    lora_dropout=0.1,
    target_modules=["q_attn", "k_proj"],  # 根据实际模型结构调整
    task_type=TaskType.CAUSAL_LM
)

# 应用LoRA并创建value head
model = AutoModelForCausalLMWithValueHead.from_pretrained(
    base_model,
    peft_config=peft_config
).to(device)

# 数据处理
items = []
with open("custom_data/queries.json", "r", encoding="utf-8") as f:
    for line in f:
        items.append(json.loads(line))

queries_dataset = Dataset.from_list(items)


def collator(data):
    queries = [item["query"] for item in data]
    return tokenizer(
        queries,
        padding=True,
        truncation=True,
        return_tensors="pt"
    ).to(device)


# PPO配置
ppo_config = PPOConfig(
    batch_size=16,
    num_ppo_epochs=3,
    learning_rate=1.41e-5,
    adap_kl_ctrl=True
)

# 初始化PPOTrainer
ppo_trainer = PPOTrainer(
    args=ppo_config,
    model=model,
    ref_model=None,  # 如果使用LoRA可以设为None
    tokenizer=tokenizer,
    dataset=queries_dataset,
    data_collator=collator
)

# 生成参数
generation_kwargs = {
    "min_length": -1,
    "top_k": 0.0,
    "top_p": 1.0,
    "do_sample": True,
    "pad_token_id": tokenizer.eos_token_id,
    "max_new_tokens": 32
}

# 训练循环
for batch in ppo_trainer.dataloader:
    # 生成响应
    query_tensors = batch["input_ids"]
    response_tensors = ppo_trainer.generate(
        query_tensors,
        **generation_kwargs
    )

    # 构建输入
    inputs = torch.cat([query_tensors, response_tensors], dim=1)

    # 计算奖励（需替换为实际奖励模型）
    with torch.no_grad():
        rewards = torch.randn(len(inputs))  # 示例奖励，应使用真实奖励模型

    # PPO更新
    stats = ppo_trainer.step(query_tensors, response_tensors, rewards)

# 保存模型
ppo_trainer.save_pretrained("model/rl_model")