import torch
import plugin
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from transformers import BertModel


@plugin.register_plugin("model", "OCRFilterRanker")
class OCRFilterRanker(nn.Module):
    """检测OCR框是否应该作为POI的一部分.

    模型主要执行过程为:
        Bert: 提取embedding.
        reorder: 将各个POI用例重新拼接为一个sequence.
        linear + act: 拼接后的embedding映射为一个dmodel维度的向量.
        transformer: 提取该牌匾中OCR序列中每个OCR的特征.
        linear: 分类得到每个OCR的logit.
    """

    def __init__(
        self,
        pretrained_path: str,
        num_layers=2,
        act="relu",
        d_model=512,
        nhead=4,
        dropout=0.1,
        *args,
        **kwargs
    ):
        super(OCRFilterRanker, self).__init__()
        self.bert = BertModel.from_pretrained(
            pretrained_path)
        self.emb_to_dmodel = nn.Sequential(
            nn.Linear(self.bert.config.hidden_size+5, d_model),
            nn.ReLU(),
        )
        transformer_layer = nn.TransformerEncoderLayer(
            d_model,
            nhead,
            dropout=dropout,
            activation="relu"
        )
        self.encoder = nn.TransformerEncoder(
            transformer_layer, num_layers
        )
        # 一层代表是组成部分的概率
        # 另一层代表排序分数
        self.clf = nn.Sequential(
            nn.Linear(d_model, d_model // 2),
            nn.ReLU(),
            nn.Linear(d_model // 2, 2)
        )

    def forward(self, x: tuple):
        # 获取每个OCR的embedding
        bert_input = x[0]
        positions = x[1]
        id_map = x[2]
        bert_output = self.bert(**bert_input)
        # 进行池化
        last_hidden_state = bert_output.last_hidden_state
        attention_mask = bert_input["attention_mask"]
        s_emb = last_hidden_state * attention_mask.unsqueeze(-1)
        # [batch_size, seq_len, emb_dim]
        s_emb = s_emb.sum(dim=1) / attention_mask.sum(-1, keepdim=True)
        # [batch_size, emb_dim]
        # print(f"s_emb {s_emb.size()}")

        # 将OCR的embebdding与位置面积信息进行拼接
        s_emb = torch.cat([s_emb, positions], dim=-1)
        # 按照例子的顺序将embedding拼接为一个序列
        xs = [s_emb[instance[1]: instance[2]] for instance in id_map]
        lengths = [instance[2] - instance[1] for instance in id_map]
        # padding并获取新的attention mask
        x = pad_sequence(xs)    # [seq_len, poi_batch, emb]
        x = self.emb_to_dmodel(x)   # [seq_len, poi_batch, dmodel]
        attention_mask = self.generate_mask(lengths)
        # 将一个框内的OCR进行关联并得到新的embedding序列
        x = self.encoder(
            x, src_key_padding_mask=attention_mask.type_as(x).bool()
        )
        logits = self.clf(x).permute(1, 0, 2)  # [poi_batch, seq_len, 2]
        return logits, lengths

    def generate_mask(self, lengths: list[int]):
        """生成pytorch transoformer的mask，掩盖padding.

        Returns:
            src_key_padding_mask (torch.BoolTensor): True位置的embedding为pad，应被掩盖.
        """
        max_len = max(lengths)
        mask = torch.zeros((len(lengths), max_len))
        for idx, l in enumerate(lengths):
            mask[idx, l:] = 1
        return mask

    def get_other_part_parameters(self):
        return [self.emb_to_dmodel.parameters(), self.encoder.parameters(), self.clf.parameters()]
    
    def get_embedding(self, x):
        # 获取每个OCR的embedding
        bert_input = x[0]
        bert_output = self.bert(**bert_input)
        return bert_output.last_hidden_state

