# -*- coding: UTF-8 -*-
"""
@Date    ：2025/9/29 11:52 
@Author  ：Liu Yuezhao
@Project ：bert 
@File    ：main_fine_tune.py
@IDE     ：PyCharm 
"""
import pandas as pd
from torch.utils.data import DataLoader
import warnings
import torch

from src.dataset.event_target_dataset import MBDDataset
from src.models.fine_tune_model import fine_tune_bert_model
from src.trainers.fine_tune_trainer import FineTuneTrainer
from src.tools.utils import load_config, read_json, split_data_to_train_valid_test, set_seed, undersample_negative_class

warnings.filterwarnings("ignore")
yaml_config = load_config("./config.yaml")
trx_dict = read_json("./data/dataset/mini_trx_unique.json")
batch_size = yaml_config["data_config"]["batch_size"]
max_seq_len = yaml_config["data_config"]["max_seq_len"]
device = yaml_config["config"]["device"]

set_seed(yaml_config["config"]["seed"])

df = pd.read_pickle("/home/datadisk/chengh/Lil/time_mbd/data/dataset/df_trx_dialogtargets_unique.pkl").sample(frac=0.1, random_state=yaml_config["config"]["seed"])

model = fine_tune_bert_model.to(device)

# 加载预训练模型
pretrained_checkpoint_path = "./checkpoints/pretrain/best_mlm_model.pth"
checkpoint = torch.load(pretrained_checkpoint_path, map_location=device)
state_dict = checkpoint.get("model_state_dict", checkpoint)
# model.load_state_dict(state_dict, strict=False)

# 键值匹配
new_state_dict = {}
for k, v in state_dict.items():
    # 修复 bert_mlm.bert.xxx -> bert.xxx
    if k.startswith("bert_mlm.bert."):
        new_k = k.replace("bert_mlm.bert.", "bert.")
        new_state_dict[new_k] = v
    # 修复 emb.xxx -> emb.xxx（保持不变）
    elif k.startswith("emb."):
        new_state_dict[k] = v
    # 其他不相关层（如 mlm_head）跳过
    else:
        print(f"Ignore pretrained key: {k}")
missing, unexpected = model.load_state_dict(new_state_dict, strict=False)
print("Missing keys (should be classifier only):", missing)
print("Unexpected keys (should be mlm_head only):", unexpected)

# 冻结主干网络
for name, param in model.named_parameters():
    # 冻结 BERT encoder（可选：只冻底层，微调高层）
    if "bert.encoder.layer." in name:
        layer_num = int(name.split("layer.")[-1].split(".")[0])
        param.requires_grad = (layer_num >= 6)  # 目前8层
    elif "bert." in name:
        # embeddings, pooler 等全部冻结
        param.requires_grad = False

    # emb 模块：全部来自预训练，但时间模块建议微调
    elif "emb." in name:
        if "word_embeddings" in name:
            # word_embeddings 已预训练，可冻结（也可微调，看效果）
            param.requires_grad = False
        else:
            # triple_time2vec + projection：必须微调！
            param.requires_grad = True

    # 分类头（或其他新增模块）：必须训练
    else:
        param.requires_grad = True

# === Step 3: 打印可训练参数（验证是否正确）===
print("\n Parameters to be fine-tuned:")
for name, param in model.named_parameters():
    if param.requires_grad:
        print(f"   {name} (shape: {param.shape})")

# 构建data loader
train_data, valid_data, test_data = split_data_to_train_valid_test(data=df, train_valid_ratio=[0.7, 0.2])
train_data_balanced = undersample_negative_class(train_data, target_col='bcard_target', ratio=18)
valid_data_balanced = undersample_negative_class(valid_data, target_col='bcard_target', ratio=18)

mbd_dataset_train = MBDDataset(trx_target_data=train_data_balanced, trx_dict=trx_dict, max_seq_len=max_seq_len, device=device)
mbd_dataset_valid = MBDDataset(trx_target_data=valid_data_balanced, trx_dict=trx_dict, max_seq_len=max_seq_len, device=device)
mbd_dataset_test = MBDDataset(trx_target_data=test_data, trx_dict=trx_dict, max_seq_len=max_seq_len, device=device)

mbd_loader_train = DataLoader(mbd_dataset_train, batch_size=batch_size, shuffle=True)
mbd_loader_valid = DataLoader(mbd_dataset_valid, batch_size=batch_size, shuffle=False)
mbd_loader_test = DataLoader(mbd_dataset_test, batch_size=batch_size, shuffle=False)

fine_tune_trainer = FineTuneTrainer(
    model=model,
    train_loader=mbd_loader_train,
    valid_loader=mbd_loader_valid,
    test_loader=mbd_loader_test,
)

fine_tune_trainer.train()