# -*- coding: UTF-8 -*-
"""
@Date    ：2025/10/20 18:07 
@Author  ：Liu Yuezhao
@Project ：bert 
@File    ：test.py
@IDE     ：PyCharm 
"""
import os
import torch
import torch.nn as nn
from transformers import BertConfig, AutoConfig
from src.transfer.model import TimeBertWithAdapter
from adapters import AutoAdapterModel, AdapterConfig

# -----------------------------
# 1. 重新定义你的模型组件（必须与训练时一致）
# -----------------------------

from src.tools.utils import load_config
from src.models.pre_time_bert_model import TimeBertEmbedding

# 获取脚本所在目录，用于构建可靠路径
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
CONFIG_PATH = os.path.normpath(os.path.join(SCRIPT_DIR, "../../config.yaml"))
CHECKPOINT_PATH = os.path.normpath(os.path.join(SCRIPT_DIR, "../../checkpoints/pretrain/best_mlm_model.pth"))

yaml_config = load_config(CONFIG_PATH)
device = yaml_config["config"]["device"]

# 构建你自己的 BERT 配置（用于后续覆盖）
event_seq_bert_config = BertConfig(
    vocab_size=yaml_config["pretrain_model_config"]["vocab_size"],
    hidden_size=yaml_config["pretrain_model_config"]["hidden_size"],
    num_hidden_layers=yaml_config["pretrain_model_config"]["num_hidden_layers"],
    num_attention_heads=yaml_config["pretrain_model_config"]["num_attention_heads"],
    intermediate_size=yaml_config["pretrain_model_config"]["intermediate_size"],
    max_position_embeddings=yaml_config["pretrain_model_config"]["max_position_embeddings"],
    hidden_act=yaml_config["pretrain_model_config"]["hidden_act"],
    hidden_dropout_prob=yaml_config["pretrain_model_config"]["hidden_dropout_prob"],
    attention_probs_dropout_prob=yaml_config["pretrain_model_config"]["attention_probs_dropout_prob"],
    pad_token_id=0,
    type_vocab_size=2
)

def load_time_bert_adapter_model(
    checkpoint_path: str,
    num_labels: int = 2,
    adapter_name: str = "fraud_detection",
    reduction_factor: int = 16,
    device: str = "cpu"
):
    checkpoint = torch.load(checkpoint_path, map_location="cpu")

    # --- Step 1: Time Embedding ---
    time_emb = TimeBertEmbedding(
        config=event_seq_bert_config,
        time2vec_dim=yaml_config["pretrain_model_config"]["hidden_size"],
        time_activation='cos'
    )
    time_emb_state = {k.replace("emb.", ""): v for k, v in checkpoint["model_state_dict"].items() if k.startswith("emb.")}
    time_emb.load_state_dict(time_emb_state, strict=True)

    # --- Step 2: 创建并加载 BERT 权重 ---
    bert_model = AutoAdapterModel.from_config(event_seq_bert_config)
    bert_state = {
        k.replace("bert_mlm.bert.", "bert."): v
        for k, v in checkpoint["model_state_dict"].items()
        if k.startswith("bert_mlm.bert.")
    }
    bert_model.load_state_dict(bert_state, strict=False)

    # --- Step 3: 添加并激活 Adapter（关键！）---
    from adapters import AdapterConfig
    adapter_config = AdapterConfig.load("pfeiffer", reduction_factor=reduction_factor)
    bert_model.add_adapter(adapter_name, config=adapter_config)
    bert_model.set_active_adapters(adapter_name)  # ✅ 显式激活
    bert_model.train_adapter(adapter_name)        # 冻结其他参数

    # --- Step 4: 组装模型 ---
    model = TimeBertWithAdapter(time_emb, bert_model, num_labels)
    model.to(device)
    return model

# -----------------------------
# 3. 使用示例
# -----------------------------

if __name__ == "__main__":
    if not os.path.exists(CHECKPOINT_PATH):
        raise FileNotFoundError(f"模型文件不存在: {CHECKPOINT_PATH}")

    model = load_time_bert_adapter_model(
        checkpoint_path=CHECKPOINT_PATH,
        num_labels=2,
        adapter_name="fraud_detection",
        reduction_factor=16,
        device=device
    )

    print("✅ Active adapters:", model.bert.active_adapters)
    print("✅ 模型加载成功！")
    print(f"模型设备: {next(model.parameters()).device}")
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"可训练参数比例: {trainable_params / total_params:.2%}")

    # ✅ 构造输入
    input_ids = torch.tensor([[101, 107, 108, 102]])
    attention_mask = torch.tensor([[1, 1, 1, 1]])
    interval = torch.randn(1, 4)
    same_event_interval = torch.randn(1, 4)
    age = torch.randn(1, 4)
    labels = torch.tensor([1])  # 假标签

    # 将数据移到模型设备
    input_ids = input_ids.to(device)
    attention_mask = attention_mask.to(device)
    interval = interval.to(device)
    same_event_interval = same_event_interval.to(device)
    age = age.to(device)
    labels = labels.to(device)

    # ✅ 前向 + 反向
    model.train()  # 确保在训练模式（Dropout 等生效）
    outputs = model(
        input_ids=input_ids,
        attention_mask=attention_mask,
        interval=interval,
        same_event_interval=same_event_interval,
        age=age,
        labels=labels,
        pos_weight=10.0
    )

    loss = outputs["loss"]
    print(f"✅ Loss: {loss.item():.4f}")

    # 清空梯度（虽然第一次没有，但好习惯）
    model.zero_grad()
    # 反向传播
    loss.backward()

    # ✅ 检查哪些参数可训练 + 是否有梯度
    print("\n=== 可训练参数及梯度状态 ===")
    for name, param in model.named_parameters():
        if param.requires_grad:
            has_grad = param.grad is not None and param.grad.abs().sum() > 0
            print(f"Trainable: {name} | Has non-zero grad: {has_grad}")

    print("✅ 梯度检查完成！")