import re
from typing import Optional

import dotenv
from datasets import Dataset, load_dataset
from trl import GRPOConfig, GRPOTrainer
from unsloth import FastLanguageModel
from vllm import SamplingParams

dotenv.load_dotenv()


max_prompt_length = 4096
max_seq_length = 4096  # Can increase for longer reasoning traces
lora_rank = 32  # Larger rank = smarter, but slower

model, tokenizer = FastLanguageModel.from_pretrained(
    model_name="Qwen/Qwen2.5-7B-Instruct",
    max_seq_length=max_seq_length,
    load_in_4bit=True,  # False for LoRA 16bit
    fast_inference=True,  # Enable vLLM fast inference
    max_lora_rank=lora_rank,
    gpu_memory_utilization=0.6,  # Reduce if out of memory
)

model = FastLanguageModel.get_peft_model(
    model,
    r=lora_rank,  # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
    target_modules=[
        "q_proj",
        "k_proj",
        "v_proj",
        "o_proj",
        "gate_proj",
        "up_proj",
        "down_proj",
    ],  # Remove QKVO if out of memory
    lora_alpha=lora_rank,
    use_gradient_checkpointing="unsloth",  # Enable long context finetuning
    random_state=3407,
)


def get_questions(split="train") -> Dataset:
    data = load_dataset("BrightXiaoHan/tari-odt-regression")[split]  # type: ignore
    data = data.map(
        lambda x: {  # type: ignore
            "prompt": [
                {"role": "user", "content": x["prompt"]},
            ],
            "answer": x["answer"],
        }
    )  # type: ignore
    return data  # type: ignore


dataset = get_questions()


def extract_answer(text) -> float:
    """
    Extract the predicted disintegration time from model output.
    Looks for the value inside \boxed{} or simply the last number in the text.
    """
    pattern = r"\\boxed\{([0-9.]+)\}"
    match = re.search(pattern, text)
    try:
        return float(match.group(1))
    except:
        return -1


# Reward functions
def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:
    responses = [completion[0]["content"] for completion in completions]
    q = prompts[0][-1]["content"]
    extracted_responses = [extract_answer(r) for r in responses]
    rewards = []

    for a, e in zip(answer, extracted_responses):
        print(f"Prediction: {e}, Ground Truth: {a}")
        error_percentage = abs(e - a) / abs(a) * 100

        if error_percentage <= 10.0:
            rewards.append(2.0)
        else:
            rewards.append(0.0)

    return rewards


def strict_format_reward_func(completions, **kwargs) -> list[float]:
    """Reward function that checks if the completion has a specific format."""
    pattern = r"\\boxed\{[0-9.]+\}"
    responses = [completion[0]["content"] for completion in completions]
    matches = [re.search(pattern, r) for r in responses]
    return [0.5 if match else 0.0 for match in matches]


training_args = GRPOConfig(
    learning_rate=5e-6,
    adam_beta1=0.9,
    adam_beta2=0.99,
    weight_decay=0.1,
    warmup_ratio=0.1,
    lr_scheduler_type="cosine",
    optim="paged_adamw_8bit",
    logging_steps=1,
    per_device_train_batch_size=1,
    gradient_accumulation_steps=1,  # Increase to 4 for smoother training
    num_generations=6,  # Decrease if out of memory
    max_prompt_length=max_prompt_length,
    max_completion_length=max_seq_length - max_prompt_length,
    # num_train_epochs = 1, # Set to 1 for a full training run
    max_steps=250,
    save_steps=250,
    max_grad_norm=0.1,
    report_to="none",  # Can use Weights & Biases
    output_dir="outputs",
)


trainer = GRPOTrainer(
    model=model,
    processing_class=tokenizer,
    reward_funcs=[
        strict_format_reward_func,
        correctness_reward_func,
    ],
    args=training_args,
    train_dataset=dataset,
)
trainer.train()


test_set = get_questions(split="test")
texts = [
    tokenizer.apply_chat_template(
        example["prompt"],
        tokenize=False,
        add_generation_prompt=True,
    )
    for example in test_set
]


sampling_params = SamplingParams(
    temperature=0.1,
    top_p=0.95,
    max_tokens=1024,
)

outputs = model.fast_generate(
    texts,
    sampling_params=sampling_params,
    lora_request=None,
)

correct = 0
total = 0
for text, output, answer in zip(texts, outputs, test_set["answer"]):
    print(answer)
    print(extract_answer(output.text))

    error_percentage = abs(extract_answer(output.text) - answer) / abs(answer) * 100
    if error_percentage <= 10.0:
        correct += 1
    total += 1

print(f"Accuracy: {correct / total}")