# -*- coding: UTF-8 -*-
"""
@Date    ：2025/9/28 17:42 
@Author  ：Liu Yuezhao
@Project ：bert 
@File    ：fine_tune_model.py
@IDE     ：PyCharm 
"""
import torch
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertForSequenceClassification, BertModel
from src.tools.utils import load_config
from src.models.pre_time_bert_model import TimeBertEmbedding

# 加载配置
yaml_config = load_config("./config.yaml")
device = yaml_config["config"]["device"]

# BERT配置(这里读取pretrain_model_config是为了和预训练的模型参数保持一致)
event_seq_bert_config = BertConfig(
    vocab_size=yaml_config["pretrain_model_config"]["vocab_size"],
    hidden_size=yaml_config["pretrain_model_config"]["hidden_size"],
    num_hidden_layers=yaml_config["pretrain_model_config"]["num_hidden_layers"],
    num_attention_heads=yaml_config["pretrain_model_config"]["num_attention_heads"],
    intermediate_size=yaml_config["pretrain_model_config"]["intermediate_size"],
    max_position_embeddings=yaml_config["pretrain_model_config"]["max_position_embeddings"],
    hidden_act=yaml_config["pretrain_model_config"]["hidden_act"],
    hidden_dropout_prob=yaml_config["pretrain_model_config"]["hidden_dropout_prob"],
    attention_probs_dropout_prob=yaml_config["pretrain_model_config"]["attention_probs_dropout_prob"],
    pad_token_id=0,
    type_vocab_size=2,
)


class SeqBertForSequenceClassification(nn.Module):
    def __init__(self, config, time2vec_dim=768, time_activation='cos'):
        super().__init__()
        self.config = config

        # embedding
        self.emb = TimeBertEmbedding(config, time2vec_dim, time_activation)

        # 主干模型（只用 encoder，不用 MLM head）
        self.bert = BertModel(config, add_pooling_layer=False)  # 取出 BertModel（包含 encoder）
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Sequential(
            nn.Linear(config.hidden_size, config.hidden_size),
            nn.GELU(),  # 或 nn.ReLU() / nn.GELU()
            nn.Dropout(config.hidden_dropout_prob),
            nn.Linear(config.hidden_size, 2)
        )

        self._init_weights()

    def _init_weights(self):
        for module in self.classifier:
            if isinstance(module, nn.Linear):
                nn.init.normal_(module.weight, std=self.config.initializer_range)
                if module.bias is not None:
                    nn.init.zeros_(module.bias)

    def forward(
            self,
            input_ids,
            attention_mask=None,
            token_type_ids=None,
            interval=None,
            same_event_interval=None,
            age=None,
            labels=None,
            # event_subtype_cls_pos=None, # 新增：[CLS] 的真实位置 [B]
            pos_weight=10.0
    ):
        # embedding
        inputs_embeds = self.emb(
            input_ids=input_ids,
            token_type_ids=token_type_ids,
            interval=interval,
            same_event_interval=same_event_interval,
            age=age
        )

        # 传入 BERT
        outputs = self.bert(
            inputs_embeds=inputs_embeds,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            return_dict=True
        )
        sequence_output = outputs.last_hidden_state # [B, L, H]
        #
        # # ✅ 正确提取 [CLS] 向量：使用 event_subtype_cls_pos
        # if event_subtype_cls_pos is None:
        #     # 兜底：如果没传，用 0（仅用于测试）
        #     cls_indices = torch.zeros(input_ids.size(0), dtype=torch.long, device=input_ids.device)
        # else:
        #     cls_indices = event_subtype_cls_pos  # [B]
        #
        # # 构造 batch_indices: [0, 1, 2, ..., B-1]
        # batch_indices = torch.arange(input_ids.size(0), device=input_ids.device)
        # # 提取 [CLS] 向量: sequence_output[batch, cls_pos]
        pooled_output = sequence_output[:, 0]  # [B, H]
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

        loss = None
        if labels is not None:
            if pos_weight is not None:
                # 使用加权 CrossEntropy
                weight = torch.tensor([1.0, pos_weight], device=labels.device, dtype=logits.dtype)
                loss_fn = nn.CrossEntropyLoss(weight=weight)
                loss = loss_fn(logits, labels)
            else:
                loss = nn.CrossEntropyLoss()(logits, labels)

        return {
            'loss': loss,
            'logits': logits,
            'predictions': logits.argmax(dim=-1),
            'probabilities': logits.softmax(dim=-1)
        }

# 初始化模型
fine_tune_bert_model = SeqBertForSequenceClassification(
    config=event_seq_bert_config,
    time2vec_dim=yaml_config["pretrain_model_config"]["hidden_size"],
    time_activation='cos'
).to(device)