# 基于多模态的
import torch
import plugin
import logging
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from torchvision import transforms
from transformers import BertModel
from efficientnet_pytorch import EfficientNet
from model.utils import FeedForwardNetwork


@plugin.register_plugin("model", "MultiModalOneStage")
class MultiModalOneStage(nn.Module):
    """检测OCR框是否应该作为POI的一部分.

    模型主要执行过程为:
        Bert: 提取embedding.
        reorder: 将各个POI用例重新拼接为一个sequence.
        image_dim_reducer: image降维.
        linear + act: 拼接后的embedding映射为一个dmodel维度的向量.
        transformer: 提取该牌匾中OCR序列中每个OCR的特征.
        linear: 分类得到每个OCR的logit.
    """

    def __init__(
        self,
        pretrained_path: str,
        num_layers=2,
        act="relu",
        d_model=768,
        nhead=4,
        dropout=0.1,
        image_emb_size=128,
        *args,
        **kwargs
    ):
        super(MultiModalOneStage, self).__init__()
        self.bert = BertModel.from_pretrained(
            pretrained_path)
        # 图像相关特征处理， efficient net 提取特征后pooling
        self.backbone = EfficientNet.from_pretrained("efficientnet-b0")
        # 图像归一化
        self.tfms = transforms.Compose(
            [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),]
        )
        # 带多边形框与不带框的图像
        self.img_reduction = FeedForwardNetwork({
            "dims": [1280, 640, image_emb_size],
            "act": act,
            "dropout": dropout,
            "batchnorm": True
        })
        self.emb_to_dmodel = FeedForwardNetwork({
            "dims": [self.bert.config.hidden_size+image_emb_size+5, d_model],
        })

        transformer_layer = nn.TransformerEncoderLayer(
            d_model,
            nhead,
            dropout=dropout,
            activation="relu"
        )
        self.encoder = nn.TransformerEncoder(
            transformer_layer, num_layers
        )
        # 一层代表是组成部分的概率
        # 另一层代表排序分数
        self.clf = FeedForwardNetwork({
            "dims": [d_model + image_emb_size, 512, 256, 2],
            "act": act,
            "dropout": dropout,
            "batchnorm": True
        })

    def get_image_emb(self, x: torch.Tensor):
        assert x.dim() == 4
        x = self.tfms(x)
        feat = self.backbone.extract_features(x).mean(dim=[2, 3])
        feat = self.img_reduction(feat)
        return feat

    def forward(self, x: tuple):
        # 获取每个OCR的embedding
        # bert_input, positions, id_map, _, images, images_bbox, full_images, full_images_bbox = x
        bert_input, positions, id_map, _, images, full_images = x
        bert_output = self.bert(**bert_input)
        # 进行池化
        last_hidden_state = bert_output.last_hidden_state
        attention_mask = bert_input["attention_mask"]
        s_emb = last_hidden_state * attention_mask.unsqueeze(-1)
        # [batch_size, seq_len, emb_dim]
        s_emb = s_emb.sum(dim=1) / attention_mask.sum(-1, keepdim=True)
        # [batch_size, emb_dim]
       
        # 获取image embedding
        images_feat = self.get_image_emb(torch.cat([images, full_images], dim=0))
        batch_size = full_images.size(0)
        full_feat = images_feat[-batch_size:]
        ocr_feat = images_feat[:-batch_size]

        # print(image_emb.size())

        # [ocr+1, 512]
        # 将OCR的embebdding与位置面积\图像信息进行拼接
        s_emb = torch.cat([s_emb, positions, ocr_feat], dim=-1)
        # 按照例子的顺序将embedding拼接为一个序列
        xs = [s_emb[instance[1]: instance[2]] for instance in id_map]
        lengths = [instance[2] - instance[1] for instance in id_map]
        # padding并获取新的attention mask
        x = pad_sequence(xs)    # [seq_len, poi_batch, emb]
        x = self.emb_to_dmodel(x)   # [seq_len, poi_batch, dmodel]
        attention_mask = self.generate_mask(lengths)
        # 将一个框内的OCR进行关联并得到新的embedding序列
        x = self.encoder(
            x, src_key_padding_mask=attention_mask.type_as(x).bool()
        )
        # [poi_batch, seq_len, emb]
        # 加上全图像emb
        full_image_emb = full_feat.unsqueeze(0)
        x = torch.cat([x, full_image_emb.repeat([x.size(0), 1, 1])], dim=-1)
        logits = self.clf(x).permute(1, 0, 2)  # [poi_batch, seq_len, 2]
        return logits, lengths

    def generate_mask(self, lengths: list[int]):
        """生成pytorch transoformer的mask，掩盖padding.

        Returns:
            src_key_padding_mask (torch.BoolTensor): True位置的embedding为pad，应被掩盖.
        """
        max_len = max(lengths)
        mask = torch.zeros((len(lengths), max_len))
        for idx, l in enumerate(lengths):
            mask[idx, l:] = 1
        return mask

    def get_other_part_parameters(self):
        return [
            self.emb_to_dmodel.parameters(),
            self.encoder.parameters(),
            self.clf.parameters(),
            self.img_reduction.parameters(),
        ]
    
    def get_embedding(self, x):
        # 获取每个OCR的embedding
        bert_input, positions, id_map, _ = x
        bert_output = self.bert(**bert_input)
        return bert_output.last_hidden_state

