import logging

import torch
from peft import LoraConfig, TaskType, get_peft_model
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, DataCollatorForLanguageModeling, \
    Trainer, DataCollatorForSeq2Seq
from datasets import load_dataset

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

model_name_or_path = "/hy-tmp/models/Qwen-0.6B/" # 或者你本地的路径 "your/local/path/to/Qwen-0.6B"
# 注意：Qwen-0.6B 在 Hugging Face 上的官方名称可能是 Qwen/Qwen-0_5B 或类似，请确认
# 如果官方没有0.6B，你可能需要寻找社区提供的或者使用接近的0.5B版本作为示例

# 加载分词器
# 对于Qwen1.5系列，需要设置 trust_remote_code=True
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, local_files_only=True)
# Qwen tokenizer 可能没有 pad_token，可以设置为 eos_token
# 加载模型
# 如果显存不足，可以考虑加载时进行量化，例如：load_in_8bit=True 或 load_in_4bit=True (需要bitsandbytes)
model = AutoModelForCausalLM.from_pretrained(
    model_name_or_path,
    local_files_only=True
    # device_map="auto" # 自动将模型分配到可用设备 (GPU/CPU)
)
if tokenizer.pad_token is None:
    if tokenizer.eos_token is not None:
        tokenizer.pad_token = tokenizer.eos_token
        logger.info(f"tokenizer.pad_token was None, set to eos_token: {tokenizer.eos_token}")
    else:
        # 对于Qwen3，这不太可能发生
        tokenizer.add_special_tokens({'pad_token': '[PAD]'})
        model.resize_token_embeddings(len(tokenizer)) # 如果添加了新token
        logger.info(f"tokenizer.pad_token was None, added new pad_token: {tokenizer.pad_token}")
# 确保pad_token_id存在
assert tokenizer.pad_token_id is not None, "Tokenizer must have a pad_token_id"

lora_config = LoraConfig(
    r=8,  # LoRA attention dimension (rank)
    lora_alpha=16, # LoRA alpha
    # target_modules=["c_attn", "c_proj", "w1", "w2"], # Qwen中Attention和MLP层参数，具体名称需查阅模型结构或相关教程
    target_modules=[
        "q_proj",
        "k_proj",
        "v_proj",
        "o_proj",
        "gate_proj",
        "up_proj",
        "down_proj"
    ], # 示例，具体模块名要看Qwen-0.6B的模型结构
    lora_dropout=0.05,
    bias="none",
    task_type=TaskType.CAUSAL_LM # 任务类型为因果语言模型
)

# model = AutoModelForCausalLM.from_pretrained(...) # 先加载原模型
model = get_peft_model(model, lora_config)
model.print_trainable_parameters() # 会显示可训练参数的比例，远小于全参数微调

# 加载数据集
data_files = {"train": "mathbench_v1/college/train.jsonl", "validation": "mathbench_v1/college/eval.jsonl"}
raw_datasets = load_dataset("json", data_files=data_files)

# 数据预处理函数
# 这个函数需要根据你的数据格式进行调整
# 目标是生成 input_ids, attention_mask, 和 labels
# 对于SFT，通常是将输入和输出拼接起来，然后让模型只预测输出部分

# 假设您已经加载了 Qwen3-0.6B 的 tokenizer
# model_name_or_path = "Qwen/Qwen3-0.6B"
# tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
# if tokenizer.pad_token is None:
#     if tokenizer.eos_token is not None:
#         tokenizer.pad_token = tokenizer.eos_token
#     else:
#         # 如果eos_token也没有，可以添加一个新的pad_token，但这需要更小心处理
#         # 对于Qwen3，它应该有eos_token
#         tokenizer.add_special_tokens({'pad_token': '[PAD]'}) # 示例，具体看Qwen推荐
#         # model.resize_token_embeddings(len(tokenizer)) # 如果添加了新token，需要调整模型嵌入层

max_seq_length = 1024 # 对于包含选项的题目，可能需要更长的序列长度
def preprocess_function_mcq(examples):
    model_inputs = {
        "input_ids": [],
        "attention_mask": [],
        "labels": [],
    }

    for i in range(len(examples["question"])):
        question_text = examples["question"][i]
        options_list = examples["options"][i]
        correct_answer_letter = examples["answer"][i].upper() # 确保是大写, e.g., "C"
        topic = examples["topic"][i] if "topic" in examples else "general knowledge"

        # 1. 构建选项字符串
        formatted_options = []
        for idx, opt_text in enumerate(options_list):
            option_letter = chr(ord('A') + idx)
            formatted_options.append(f"{option_letter}) {opt_text}")
        options_str = "\n".join(formatted_options)

        # 2. 找到正确答案的文本
        correct_option_text = ""
        try:
            correct_option_idx = ord(correct_answer_letter) - ord('A')
            if 0 <= correct_option_idx < len(options_list):
                correct_option_text = options_list[correct_option_idx]
            else:
                logger.warning(f"Invalid answer letter '{correct_answer_letter}' for question with {len(options_list)} options. Skipping sample: {question_text[:50]}...")
                continue
        except TypeError: # 如果 correct_answer_letter 不是字符串
            logger.warning(f"Answer letter is not a string: {correct_answer_letter}. Skipping sample: {question_text[:50]}...")
            continue


        # 3. 构建 Qwen 对话格式的 messages
        # 系统提示可以引导模型扮演的角色和任务
        system_content = f"You are an expert AI assistant specializing in {topic}. Please analyze the following multiple-choice question and select the correct answer."

        # 用户提问
        user_content = f"Question: {question_text}\n\nOptions:\n{options_str}\n\nWhich of the following is the correct answer? Please provide the letter of the correct option and its content."

        # 期望的助手回答
        # 您可以选择只让模型输出选项字母，或者字母+内容。后者通常更好。
        assistant_content = f"The correct answer is {correct_answer_letter}: {correct_option_text}"
        # 如果只想输出字母: assistant_content = correct_answer_letter

        messages = [
            {"role": "system", "content": system_content},
            {"role": "user", "content": user_content},
            {"role": "assistant", "content": assistant_content}
        ]

        # 4. 使用 tokenizer.apply_chat_template
        try:
            # add_generation_prompt=False 因为我们提供了完整的对话，包括答案。
            full_formatted_text = tokenizer.apply_chat_template(
                messages,
                tokenize=False,
                add_generation_prompt=False
            )
            # 对于某些tokenizer，如果模板本身不包含eos，可能需要手动添加
            # if not full_formatted_text.endswith(tokenizer.eos_token):
            #    full_formatted_text += tokenizer.eos_token

        except Exception as e:
            logger.error(f"Error applying chat template for question: {question_text[:50]}... Error: {e}")
            continue

        tokenized_chat = tokenizer(
            full_formatted_text,
            max_length=max_seq_length,
            truncation=True,
            padding=False, # DataCollator 会处理padding
        )

        # 5. 创建 Labels (核心部分，确保只对助手回答部分计算损失)
        current_labels = list(tokenized_chat["input_ids"]) # 复制 input_ids 作为初始 labels

        # 构建不包含最终答案的提示部分 (system + user + assistant_prefix)
        prompt_messages = messages[:-1] # System and User messages
        try:
            # add_generation_prompt=True 会在末尾加上助手的引导，例如 "<|im_start|>assistant\n"
            prompt_only_text = tokenizer.apply_chat_template(
                prompt_messages,
                tokenize=False,
                add_generation_prompt=True # 重要：这会添加助手开始的标记
            )
        except Exception as e:
            logger.error(f"Error applying chat template for prompt_only for question: {question_text[:50]}... Error: {e}")
            continue

        # 分词提示部分，以确定其在完整对话中的长度
        tokenized_prompt_only = tokenizer(
            prompt_only_text,
            max_length=max_seq_length, # 确保不会因为提示本身超长而出错
            truncation=True,
            add_special_tokens=False # 这里的行为需要小心，通常 apply_chat_template 已经处理了 special tokens
                                     # 或者依赖于 apply_chat_template 的输出结构
        )
        prompt_length = len(tokenized_prompt_only["input_ids"])

        # 将提示部分的 labels 设置为 -100
        # 需要非常小心地确保这里的 prompt_length 是正确的，对应于 tokenized_chat 中需要被忽略的部分
        # 最好的方式是确保 prompt_only_text + assistant_content (tokenized separately then combined in a specific way) matches full_formatted_text
        # 如果 tokenizer.apply_chat_template(messages[:-1], add_generation_prompt=True) 得到的 token 序列
        # 正好是 tokenizer.apply_chat_template(messages, add_generation_prompt=False) 的前缀（直到助手回答开始），那么这种方法有效。
        # Qwen3 的 tokenizer.apply_chat_template 应该能很好地处理这个。

        valid_sample = False
        for k in range(len(current_labels)):
            if k < prompt_length:
                current_labels[k] = -100
            elif current_labels[k] != tokenizer.pad_token_id: # 确保至少有一个非pad的label
                valid_sample = True

        if not valid_sample and len(current_labels) > 0 : # 如果所有label都是-100或pad
             # 这种情况可能发生在提示过长，答案完全被截断且没有非pad的token
             if prompt_length >= len(tokenized_chat["input_ids"]):
                logger.warning(f"Prompt too long, answer completely truncated or only padding left. Skipping sample: {question_text[:50]}...")
                continue
             else: # 如果只是被pad了，但理论上应该有内容
                logger.warning(f"Sample results in all labels being -100 or pad_token_id. Check prompt_length calculation and truncation. Skipping sample: {question_text[:50]}...")
                continue


        model_inputs["input_ids"].append(tokenized_chat["input_ids"])
        model_inputs["attention_mask"].append(tokenized_chat["attention_mask"])
        model_inputs["labels"].append(current_labels)

    # 过滤掉因为错误或截断而没有成功处理的空样本（如果上面continue了）
    # 这一步不一定需要，因为如果上面continue了，就不会append。但作为检查。
    # valid_indices = [idx for idx, labels in enumerate(model_inputs["labels"]) if any(l != -100 and l != tokenizer.pad_token_id for l in labels)]
    # model_inputs = {key: [value[i] for i in valid_indices] for key, value in model_inputs.items()}
    # assert len(model_inputs["input_ids"]) == len(model_inputs["attention_mask"]) == len(model_inputs["labels"])
    # for i in range(len(model_inputs["input_ids"])):
    #     assert len(model_inputs["input_ids"][i]) == len(model_inputs["attention_mask"][i]) == len(model_inputs["labels"][i])
    return model_inputs

tokenized_datasets = raw_datasets.map(preprocess_function_mcq, batched=True, remove_columns=raw_datasets["train"].column_names)
# print("Checking tokenized dataset samples:")
# for i in range(min(5, len(tokenized_datasets["train"]))): # 打印前5个样本
#     sample = tokenized_datasets["train"][i]
#     print(f"Sample {i}:")
#     print(f"  input_ids length: {len(sample['input_ids'])}")
#     print(f"  labels length: {len(sample['labels'])}")
#     print(f"  attention_mask length: {len(sample['attention_mask'])}")
#     if len(sample['input_ids']) != len(sample['labels']):
#         print(f"  WARNING: input_ids and labels length mismatch for sample {i}!")

training_args = TrainingArguments(
    output_dir="./qwen_peft_output",        # 模型和 checkpoints 的输出目录
    num_train_epochs=3,                  # 训练的总轮数
    per_device_train_batch_size=4,       # 每个 GPU 上的训练批量大小 (根据显存调整)
    per_device_eval_batch_size=4,        # 每个 GPU 上的评估批量大小
    gradient_accumulation_steps=2,       # 梯度累积步数 (变相增大batch size，节省显存)
    # 每个 epoch 结束后进行评估
    save_strategy="epoch",               # 每个 epoch 结束后保存模型
    learning_rate=2e-5,                  # 学习率
    weight_decay=0.01,                   # 权重衰减
    logging_dir='./logs',                # 日志目录
    logging_steps=100,                   # 每隔多少步记录一次日志
    fp16=True,                           # 如果你的 GPU 支持，开启混合精度训练以加速并节省显存 (需要NVIDIA Apex或PyTorch AMP)
    # optim="adamw_torch",               # 优化器类型
    # report_to="tensorboard",           # 将日志报告给 TensorBoard
    # load_best_model_at_end=True,       # 训练结束后加载最佳模型
    # metric_for_best_model="eval_loss", # 用评估损失来判断最佳模型
    # ... 其他参数
)

# Data Collator 用于将数据批处理，并进行适当的填充
# 使用 DataCollatorForSeq2Seq
data_collator = DataCollatorForSeq2Seq(
    tokenizer=tokenizer,
    # model=model, # model 参数通常是可选的
    label_pad_token_id=-100,  # <--- 关键：告诉它用-100填充labels的padding部分
    pad_to_multiple_of=8,     # 您之前的设置
    padding="longest"         # 或者 True，确保它进行填充
                              # "longest": 填充到批次中最长序列的长度
                              # True: 等同于 "longest"
)

# manual_batch_size = 4 # 例如
# if len(tokenized_datasets["train"]) >= manual_batch_size:
#     raw_batch_list = [tokenized_datasets["train"][i] for i in range(manual_batch_size)]
#
#     print("\n--- Manually inspecting DataCollator behavior ---")
#     for i, sample in enumerate(raw_batch_list):
#         print(f"Raw Sample {i} (from dataset before collator):")
#         print(f"  input_ids len: {len(sample['input_ids'])}, first 5: {sample['input_ids'][:5]}")
#         print(f"  labels len: {len(sample['labels'])}, first 5: {sample['labels'][:5]}")
#         # 确认每个样本的 labels 确实是列表
#         assert isinstance(sample['labels'], list), f"Sample {i} labels is not a list!"
#         assert all(isinstance(x, int) for x in sample['labels']), f"Sample {i} labels does not contain all ints!"
#
#
#     try:
#         print("\nCalling data_collator with the raw batch list...")
#         collated_batch = data_collator(raw_batch_list) # 这会调用 DataCollatorForSeq2Seq 的 __call__ 方法
#
#         print("\nCollated batch (output from data_collator):")
#         for key, value in collated_batch.items():
#             if hasattr(value, 'shape'):
#                 print(f"  {key} shape: {value.shape}")
#             else:
#                 print(f"  {key}: {value}") # 打印非张量类型的值，以防万一
#
#         # 关键检查：collated_batch["labels"] 是否是一个形状规整的张量
#         assert collated_batch["labels"].ndim == 2, "Collated labels should be a 2D tensor"
#         print("Labels seem to be collated correctly into a 2D tensor.")
#
#     except ValueError as e:
#         original_labels_list = [sample['labels'] for sample in raw_batch_list]
#         original_input_ids_list = [sample['input_ids'] for sample in raw_batch_list]
#
#         print("\n--- Deep dive into tokenizer.pad for labels ---")
#
#         # 1. 先确定 input_ids 填充后的目标长度
#         padded_inputs_info = tokenizer.pad(
#             {"input_ids": original_input_ids_list},  # 只填充 input_ids 以获取目标长度
#             padding="longest",  # 或者 True
#             pad_to_multiple_of=8,  # 与您的 collator 设置一致
#             return_tensors="pt"
#         )
#         target_padded_length = padded_inputs_info["input_ids"].shape[1]
#         print(f"Target padded length (from input_ids padding): {target_padded_length}")
#
#         # 2. 现在用这个目标长度来尝试填充 labels
#         print(f"Attempting to pad labels to target length: {target_padded_length}")
#         try:
#             # 模拟 DataCollatorForSeq2Seq 对 labels 的填充方式
#             # 它会把 labels 包装在 "input_ids" 键下，然后调用 pad
#             labels_features_for_padding = [{"input_ids": l} for l in original_labels_list]  # 这是一个List[Dict]
#             # 不对，应该是  {"input_ids": original_labels_list}
#             # 即一个字典，值为 List[List[int]]
#
#             padded_labels_output = tokenizer.pad(
#                 {"input_ids": original_labels_list},  # 传入 List[List[int]]
#                 padding=True,  # 强制填充
#                 max_length=target_padded_length,  # 强制填充到目标长度
#                 truncation=True,  # 如果有超过的，也进行截断
#                 pad_to_multiple_of=None,  # 这里不再需要，因为 max_length 已经是目标
#                 return_attention_mask=False,
#                 return_tensors="pt"
#             )
#             padded_labels_tensor = padded_labels_output["input_ids"]
#             print(f"Manually padded labels tensor shape: {padded_labels_tensor.shape}")
#             if padded_labels_tensor.shape[1] != target_padded_length:
#                 print(
#                     f"WARNING: Manually padded labels length ({padded_labels_tensor.shape[1]}) does not match target_padded_length ({target_padded_length})!")
#
#         except Exception as e:
#             print(f"ERROR during manual tokenizer.pad() for labels with target_length: {e}")
#             import traceback
#
#             traceback.print_exc()
#
# else:
#     print("Not enough samples in tokenized_datasets['train'] for manual batch test.")

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["validation"],
    tokenizer=tokenizer,
    data_collator=data_collator,
)

# 开始训练
trainer.train()
torch.cuda.empty_cache()

trainer.save_model("final_model_peft")
tokenizer.save_pretrained("final_model_peft")