
import torch
import torch.nn as nn
from transformers import AutoModel, AutoConfig
from typing import Dict, Optional, Tuple


class BaseNERModel(nn.Module):
    """
    基础命名实体识别模型（仅实体抽取部分）
    实现BERT+线性层的标准序列标注架构
    """

    def __init__(self,
                 pretrained_model_name: str,
                 num_entity_labels: int,
                 dropout_prob: float = 0.1):
        super().__init__()
        self.config = AutoConfig.from_pretrained(pretrained_model_name)

        # 预训练编码器
        self.encoder = AutoModel.from_pretrained(pretrained_model_name)

        # 实体识别头
        self.entity_classifier = nn.Sequential(
            nn.Dropout(dropout_prob),
            nn.Linear(self.config.hidden_size, num_entity_labels)
        )

        # 损失函数
        self.loss_fn = nn.CrossEntropyLoss(ignore_index=-100)

    def forward(
            self,
            input_ids: torch.Tensor,
            attention_mask: torch.Tensor,
            labels: Optional[torch.Tensor] = None
    ) -> Dict[str, torch.Tensor]:
        """
        前向传播
        参数:
            input_ids: [batch_size, seq_len]
            attention_mask: [batch_size, seq_len]
            labels: [batch_size, seq_len] (可选)
        返回:
            Dict {
                "logits": [batch_size, seq_len, num_labels],
                "loss": 标量 (当提供labels时)
            }
        """
        outputs = self.encoder(
            input_ids=input_ids,
            attention_mask=attention_mask,
            output_hidden_states=True
        )

        sequence_output = outputs.last_hidden_state  # [batch_size, seq_len, hidden_size]
        logits = self.entity_classifier(sequence_output)

        outputs = {"logits": logits}
        if labels is not None:
            loss = self.loss_fn(
                logits.view(-1, self.entity_classifier[-1].out_features),
                labels.view(-1)
            )
            outputs["loss"] = loss

        return outputs


class BaseRelationModel(nn.Module):
    """
    基础关系抽取模型（仅关系分类部分）
    实现实体对表示+关系分类架构
    """

    def __init__(self,
                 hidden_size: int,
                 num_relation_labels: int,
                 dropout_prob: float = 0.1):
        super().__init__()

        # 关系分类器
        self.classifier = nn.Sequential(
            nn.Dropout(dropout_prob),
            nn.Linear(2 * hidden_size, hidden_size),
            nn.ReLU(),
            nn.Dropout(dropout_prob),
            nn.Linear(hidden_size, num_relation_labels)
        )

        # 损失函数
        self.loss_fn = nn.CrossEntropyLoss()

    def forward(
            self,
            entity_pairs: torch.Tensor,
            relation_labels: Optional[torch.Tensor] = None
    ) -> Dict[str, torch.Tensor]:
        """
        前向传播
        参数:
            entity_pairs: [num_pairs, 2 * hidden_size]
            relation_labels: [num_pairs] (可选)
        返回:
            Dict {
                "logits": [num_pairs, num_relations],
                "loss": 标量 (当提供labels时)
            }
        """
        logits = self.classifier(entity_pairs)

        outputs = {"logits": logits}
        if relation_labels is not None:
            loss = self.loss_fn(logits, relation_labels)
            outputs["loss"] = loss

        return outputs


class BaseJointModel(nn.Module):
    """
    联合抽取基础组件（不包含具体业务逻辑）
    提供共享的预训练编码器和通用方法
    """

    def __init__(self, pretrained_model_name: str):
        super().__init__()
        self.config = AutoConfig.from_pretrained(pretrained_model_name)
        self.encoder = AutoModel.from_pretrained(pretrained_model_name)

    def _extract_entity_representations(
            self,
            sequence_output: torch.Tensor,
            entity_positions: torch.Tensor
    ) -> torch.Tensor:
        """
        从序列表示中提取实体表示
        参数:
            sequence_output: [batch_size, seq_len, hidden_size]
            entity_positions: [batch_size, num_entities, 2]
                            (每行是[start_pos, end_pos])
        返回:
            [batch_size, num_entities, hidden_size]
        """
        batch_size, num_entities, _ = entity_positions.shape
        hidden_size = sequence_output.size(-1)

        # 获取实体起始和结束位置的表示
        start_pos = entity_positions[..., 0]  # [batch_size, num_entities]
        end_pos = entity_positions[..., 1]

        # 使用gather提取对应位置的向量
        start_repr = torch.gather(
            sequence_output,
            dim=1,
            index=start_pos.unsqueeze(-1).expand(-1, -1, hidden_size)
        )
        end_repr = torch.gather(
            sequence_output,
            dim=1,
            index=end_pos.unsqueeze(-1).expand(-1, -1, hidden_size)
        )

        # 组合表示（平均或拼接）
        entity_repr = (start_repr + end_repr) / 2  # 简单平均策略
        return entity_repr

    def _create_entity_pairs(
            self,
            entity_repr: torch.Tensor
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        创建所有可能的实体对组合
        参数:
            entity_repr: [batch_size, num_entities, hidden_size]
        返回:
            Tuple(
                entity_pairs: [batch_size, num_pairs, 2 * hidden_size],
                pair_indices: [batch_size, num_pairs, 2]
            )
        """
        batch_size, num_entities, hidden_size = entity_repr.shape
        device = entity_repr.device

        # 生成所有可能的实体对索引
        idx = torch.arange(num_entities, device=device)
        head_idx = idx.view(1, -1).repeat(num_entities, 1).view(-1)
        tail_idx = idx.view(-1, 1).repeat(1, num_entities).view(-1)

        # 排除自环
        mask = head_idx != tail_idx
        head_idx = head_idx[mask]
        tail_idx = tail_idx[mask]

        # 获取对应的实体表示
        head_repr = entity_repr[:, head_idx]  # [batch_size, num_pairs, hidden_size]
        tail_repr = entity_repr[:, tail_idx]

        # 组合成实体对表示
        entity_pairs = torch.cat([head_repr, tail_repr], dim=-1)
        pair_indices = torch.stack([head_idx, tail_idx], dim=-1)

        return entity_pairs, pair_indices