import math
import os

import torch
from datasets import load_dataset
from tqdm import tqdm
from transformers import (
    Trainer,
    TrainingArguments,
    AutoModelForCausalLM,
    AutoTokenizer,
    DataCollatorForLanguageModeling,
)

os.environ["TRANSFORMERS_OFFLINE"] = "1"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 1. 选择预训练模型和分词器
model_name = "Qwen-0.6B"
# model_name = "gpt2"
local_model = f"../models/{model_name}"
tokenizer = AutoTokenizer.from_pretrained(local_model, local_files_only=True)
# GPT2 没有 pad token，需要手动添加
if tokenizer.pad_token is None:
    tokenizer.add_special_tokens({"pad_token": "<|pad|>"})

model = AutoModelForCausalLM.from_pretrained(local_model, local_files_only=True)
model.to(device)
# 同步扩充模型词表
model.resize_token_embeddings(len(tokenizer))

# 2. 加载并预处理 JSONL 数据集
dataset = "ARC-e"
data_files = {"train": f"./data/ARC/{dataset}/ARC-Dev.jsonl",
              "validation": f"./data/ARC/{dataset}/ARC-Test.jsonl"}
datasets = load_dataset("json", data_files=data_files)


# 格式化函数: 构造输入提示并将答案附加为模型学习目标
def format_example(example):
    question = example['question']
    options = [f"{choice['label']}: {choice['text']}" for choice in question['choices']]
    text = (f"Question: {question['stem']}\n"
            '\n'.join(options) +
            f"\nAnswer: {example['answerKey']}\n")
    return {"text": text}


formatted = datasets.map(format_example)

# 分词函数
max_length = 256


def tokenize_function(examples):
    toks = tokenizer(
        examples["text"],
        return_attention_mask=True,
        truncation=True,
        max_length=max_length,
        padding="max_length",
    )
    toks["labels"] = toks["input_ids"].copy()
    return toks


tokenized_datasets = formatted.map(
    tokenize_function,
    batched=True,
    remove_columns=["question", "id", "answerKey", "text"],
)

# 3. 设置数据收集器
data_collator = DataCollatorForLanguageModeling(
    tokenizer=tokenizer,
    mlm=False,
)

# 4. 配置训练参数
output_dir = f"./outputs/{model_name}"
os.makedirs(output_dir, exist_ok=True)
training_args = TrainingArguments(
    output_dir=output_dir,
    overwrite_output_dir=True,
    per_device_train_batch_size=10,
    per_device_eval_batch_size=10,
    gradient_accumulation_steps=8,
    # evaluation_strategy="steps",
    eval_steps=200,
    logging_steps=100,
    save_steps=0.5,
    num_train_epochs=5,
    learning_rate=1e-4,
    warmup_steps=200,
    weight_decay=0.01,
    fp16=True,
    push_to_hub=False,
)

# 5. 初始化 Trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["validation"],
    data_collator=data_collator
)


@torch.no_grad()
# 可选: 推理示例
def generate(prompt, max_length=2, log=False):
    model.eval()
    text = prompt + "Answer: "
    inputs = tokenizer(text, return_tensors="pt").to(model.device)
    outputs = model.generate(
        **inputs,
        max_length=max_length + len(inputs["input_ids"][0]),
        num_beams=1,
        no_repeat_ngram_size=2,
        early_stopping=True,
    )
    generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
    if log:
        print()
        print(generated)
    return generated[len(text):].strip()[0]  # 去掉提示部分


@torch.no_grad()
def evaluate_on_test_set(test_dataset, test_len=-1, log=False):
    correct = 0
    total = 0

    for example in tqdm(test_dataset, total=min(test_len, len(test_dataset)) if test_len > 0 else len(test_dataset)):
        question = example['question']
        options = [f"{choice['label']}: {choice['text']}" for choice in question['choices']]
        prompt = (f"Question: {question['stem']}\n"
                  '\n'.join(options) + "\n")
        prediction = generate(prompt, log=log).strip()
        if log:
            print(f"True Answer: {example['answerKey']}")
        # print(f"Prompt: {prompt}"
        #       f"\nPrediction: {prediction}")

        # 模型可能输出的是完整句子或只有选项字母
        predicted_answer = prediction.strip()
        if predicted_answer == example['answerKey']:
            correct += 1
        total += 1
        if total == test_len:
            break

    accuracy = correct / total
    print(f"Test Accuracy: {accuracy:.2%} ({correct}/{total})")
    return accuracy


if __name__ == "__main__":
    # 6. 在微调前评估性能
    print("Evaluating before fine-tuning...")
    baseline_metrics = trainer.evaluate()
    baseline_loss = baseline_metrics["eval_loss"]
    baseline_ppl = math.exp(baseline_loss)
    print(f"Baseline Loss: {baseline_loss:.4f}, Perplexity: {baseline_ppl:.2f}")

    print("Evaluating test set accuracy before fine-tuning...")
    example = (f"Question: Why does sexual reproduction result in more genetic diversity than asexual reproduction?\n"
               f"A: Traits from two parents are combined.\n"
               f"B: More organisms reproduce this way.\n"
               f"C: Offspring grow in different environments.\n"
               f"D: Offspring come from identical parents.\n")
    generate(example, log=True)
    print("True Answer: A")
    evaluate_on_test_set(datasets["validation"], log=False)

    # 7. 开始训练
    trainer.train()
    torch.cuda.empty_cache()

    # 8. 微调后评估性能
    print("Evaluating after fine-tuning...")
    finetune_metrics = trainer.evaluate()
    ft_loss = finetune_metrics["eval_loss"]
    ft_ppl = math.exp(ft_loss)
    print(f"Fine-tuned Loss: {ft_loss:.4f}, Perplexity: {ft_ppl:.2f}")
    print("Evaluating test set accuracy after fine-tuning...")
    evaluate_on_test_set(datasets["validation"], log=False)

    # 简单测试
    generate(example, log=True)
    print("True Answer: A")
