import torch
from peft import LoraConfig, TaskType
from modelscope import AutoTokenizer, BitsAndBytesConfig, AutoModelForCausalLM
from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer
from datasets import Dataset
import json
import config

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

model_path = config.model_name
tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenizer.padding_side = "right"
tokenizer.pad_token = tokenizer.eos_token

bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.float16)
base_model = AutoModelForCausalLM.from_pretrained(model_path, quantization_config=bnb_config,torch_dtype=torch.float16, low_cpu_mem_usage=True)

peft_config = LoraConfig(r=2, target_modules=["q_proj", "k_proj", "v_proj"], task_type=TaskType.CAUSAL_LM, lora_alpha=16, lora_dropout=0.05)
model = AutoModelForCausalLMWithValueHead.from_pretrained(base_model, peft_config=peft_config, reward_adapter="model/reward_model", aa="bb").to(device)

items = []
with open("custom_data/queries.json", "r", encoding="utf-8") as f:
    for line in f:
        items.append(json.loads(line))
queries_dataset = Dataset.from_list(items)

def collator(data):
    queries = []
    for item in data:
        queries.append(tokenizer(item["query"], return_tensors="pt")["input_ids"].squeeze().to("cuda"))
    return queries

ppo_config = PPOConfig(kl_coef=0.05, num_ppo_epochs=3, batch_size=16)
ppo_trainer = PPOTrainer(args=ppo_config, model=model, ref_model=None, processing_class=tokenizer, train_dataset=queries_dataset, data_collator=collator)

generation_kwargs = {
    "min_length": -1,
    "top_k": 0.0,
    "top_p": 1.0,
    "do_sample": True,
    "pad_token_id": tokenizer.eos_token_id,
    "max_new_tokens": 32
}

for batch in ppo_trainer.dataloader:
    query_tensors = batch
    response_tensors = ppo_trainer.generate(query_tensors, **generation_kwargs)
    scores = []
    for query, response in zip(query_tensors, response_tensors):
        input_ids = torch.concat([query, response], dim=0)
        input_ids = torch.unsqueeze(input_ids, 0)
        score = ppo_trainer.model.compute_reward_score(input_ids)
        score = scores[0, -1, 0]
        scores.append(score)
    stats = ppo_trainer.step(query_tensors, response_tensors, scores)

ppo_trainer.save_pretrained("model/rl_model")