# models/joint_model.py
import torch
import torch.nn as nn
from typing import Dict, Optional, Tuple
from .base_model import BaseJointModel, BaseNERModel, BaseRelationModel


class MedicalJointExtractionModel(BaseJointModel):
    """
    医疗实体关系联合抽取模型
    继承自BaseJointModel，整合实体识别和关系抽取模块
    """

    def __init__(self,
                 pretrained_model_name: str,
                 num_entity_labels: int,
                 num_relation_labels: int,
                 dropout_prob: float = 0.1):
        """
        初始化
        参数:
            pretrained_model_name: 预训练模型名称(如bert-base-chinese)
            num_entity_labels: 实体标签数量(BIO格式)
            num_relation_labels: 关系类型数量
            dropout_prob: Dropout概率
        """
        super().__init__(pretrained_model_name)

        # 实体识别模块
        self.entity_head = nn.Sequential(
            nn.Dropout(dropout_prob),
            nn.Linear(self.config.hidden_size, num_entity_labels)
        )

        # 关系抽取模块
        self.relation_head = nn.Sequential(
            nn.Dropout(dropout_prob),
            nn.Linear(2 * self.config.hidden_size, self.config.hidden_size),
            nn.ReLU(),
            nn.Dropout(dropout_prob),
            nn.Linear(self.config.hidden_size, num_relation_labels)
        )

        # 损失函数
        self.entity_loss_fn = nn.CrossEntropyLoss(ignore_index=-100)
        self.relation_loss_fn = nn.CrossEntropyLoss()

        # 初始化权重
        self.init_weights()

    def init_weights(self):
        """初始化新增层的权重"""
        for module in [self.entity_head, self.relation_head]:
            if isinstance(module, nn.Linear):
                nn.init.xavier_uniform_(module.weight)
                if module.bias is not None:
                    nn.init.zeros_(module.bias)

    def forward(
            self,
            input_ids: torch.Tensor,
            attention_mask: torch.Tensor,
            entity_labels: Optional[torch.Tensor] = None,
            relation_matrix: Optional[torch.Tensor] = None,
            entity_positions: Optional[torch.Tensor] = None
    ) -> Dict[str, torch.Tensor]:
        """
        完整的前向传播流程
        参数:
            input_ids: [batch_size, seq_len]
            attention_mask: [batch_size, seq_len]
            entity_labels: [batch_size, seq_len] (训练时提供)
            relation_matrix: [batch_size, num_entities, num_entities] (训练时提供)
            entity_positions: [batch_size, num_entities, 2] (可选，预测时使用)
        返回:
            Dict {
                "entity_logits": [batch_size, seq_len, num_entity_labels],
                "relation_logits": [batch_size, num_pairs, num_relation_labels],
                "loss": 加权损失 (当提供标签时)
            }
        """
        # 编码器前向传播
        outputs = self.encoder(
            input_ids=input_ids,
            attention_mask=attention_mask,
            output_hidden_states=True
        )
        sequence_output = outputs.last_hidden_state

        # 实体识别
        entity_logits = self.entity_head(sequence_output)

        # 关系抽取准备
        batch_size = input_ids.size(0)

        if entity_positions is None and entity_labels is not None:
            # 训练时从标签获取实体位置
            entity_positions = self._get_entity_positions(entity_labels)

        # 关系抽取
        relation_outputs = self._extract_relations(
            sequence_output,
            entity_positions,
            relation_matrix
        )

        # 计算总损失
        loss = None
        if entity_labels is not None and relation_matrix is not None:
            entity_loss = self.entity_loss_fn(
                entity_logits.view(-1, self.entity_head[-1].out_features),
                entity_labels.view(-1)
            )

            relation_loss = self.relation_loss_fn(
                relation_outputs["relation_logits"].view(-1, self.relation_head[-1].out_features),
                relation_outputs["relation_labels"].view(-1)
            ) if relation_outputs["relation_labels"] is not None else 0

            loss = entity_loss + 0.5 * relation_loss  # 加权组合

        return {
            "entity_logits": entity_logits,
            "relation_logits": relation_outputs["relation_logits"],
            "loss": loss
        }

    def _get_entity_positions(self, entity_labels: torch.Tensor) -> torch.Tensor:
        """
        从实体标签中提取实体位置
        参数:
            entity_labels: [batch_size, seq_len]
        返回:
            [batch_size, num_entities, 2] (start, end positions)
        """
        batch_positions = []

        for batch_idx in range(entity_labels.size(0)):
            # 处理单个样本
            labels = entity_labels[batch_idx]
            entity_spans = []
            current_entity = None

            for pos in range(labels.size(0)):
                label_id = labels[pos].item()
                if label_id == -100:  # 忽略特殊token
                    continue

                if label_id % 2 == 1:  # B-标签
                    if current_entity is not None:
                        entity_spans.append(current_entity)
                    current_entity = [pos, pos + 1]
                elif label_id != 0 and current_entity is not None:  # I-标签
                    current_entity[1] = pos + 1

            if current_entity is not None:
                entity_spans.append(current_entity)

            # 转换为张量
            if entity_spans:
                batch_positions.append(torch.tensor(entity_spans, device=entity_labels.device))
            else:
                batch_positions.append(torch.zeros((0, 2), device=entity_labels.device))

        # 填充为统一尺寸
        max_entities = max(len(x) for x in batch_positions)
        padded_positions = torch.zeros(
            (len(batch_positions), max_entities, 2),
            device=entity_labels.device,
            dtype=torch.long
        )

        for i, positions in enumerate(batch_positions):
            if len(positions) > 0:
                padded_positions[i, :len(positions)] = positions

        return padded_positions

    def _extract_relations(
            self,
            sequence_output: torch.Tensor,
            entity_positions: torch.Tensor,
            relation_labels: Optional[torch.Tensor] = None
    ) -> Dict[str, Optional[torch.Tensor]]:
        """
        关系抽取核心逻辑
        参数:
            sequence_output: [batch_size, seq_len, hidden_size]
            entity_positions: [batch_size, num_entities, 2]
            relation_labels: [batch_size, num_entities, num_entities] (可选)
        返回:
            Dict {
                "relation_logits": [batch_size, num_pairs, num_relation_labels],
                "relation_labels": [batch_size, num_pairs] (当输入relation_labels时)
            }
        """
        batch_size, num_entities, _ = entity_positions.shape
        device = sequence_output.device

        # 提取实体表示
        entity_repr = self._extract_entity_representations(sequence_output, entity_positions)

        # 创建实体对
        relation_logits = []
        flat_relation_labels = []

        for i in range(batch_size):
            # 获取当前样本的有效实体
            valid_mask = (entity_positions[i, :, 0] != 0).any(dim=-1)
            curr_entities = entity_repr[i, valid_mask]
            num_valid = curr_entities.size(0)

            if num_valid == 0:
                continue

            # 生成实体对组合
            head_repr = curr_entities.unsqueeze(1).expand(-1, num_valid, -1)
            tail_repr = curr_entities.unsqueeze(0).expand(num_valid, -1, -1)
            pairs = torch.cat([head_repr, tail_repr], dim=-1)

            # 预测关系
            curr_logits = self.relation_head(pairs)  # [num_valid, num_valid, num_relations]

            # 处理标签
            if relation_labels is not None:
                curr_labels = relation_labels[i, valid_mask][:, valid_mask]
                mask = torch.eye(num_valid, dtype=torch.bool, device=device)
                curr_labels = curr_labels[~mask]  # 移除自环
                flat_relation_labels.append(curr_labels.flatten())

            # 移除自环并展平
            curr_logits = curr_logits[~torch.eye(num_valid, dtype=torch.bool, device=device)]
            relation_logits.append(curr_logits)

        # 合并批次结果
        if relation_logits:
            relation_logits = torch.cat(relation_logits)
            relation_labels = torch.cat(flat_relation_labels) if flat_relation_labels else None
        else:
            relation_logits = torch.zeros((0, self.relation_head[-1].out_features), device=device)
            relation_labels = None

        return {
            "relation_logits": relation_logits,
            "relation_labels": relation_labels
        }