# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import Sequential
from mmcv.runner import BaseModule
import mmocr.utils as utils

from mmocr.models.builder import CLASSIFIERS
from .base_classifier import BaseClassifier

from mmocr.models.textrecog.layers import Classifier

@CLASSIFIERS.register_module()
class MTRClassifier(BaseClassifier):
    def __init__(self,
                 num_lang=7,
                 init_cfg=dict(type='Xavier', layer='Conv2d'),
                 **kwargs):
        super().__init__(init_cfg=init_cfg)

        self.num_lang = num_lang
        self.rnn = nn.LSTM(512, 256, bidirectional=True)
        self.classifier = Sequential(Classifier(512, 256, 512, num_lang))
        # The second LSTM out dim: std_attention->512, else->256

    def forward(self, feat):
        feat=torch.mean(feat,dim=2,keepdim=True)
        assert feat.size(2) == 1, 'feature height must be 1' # [N, C, H, W] ,batch, channels, height, width
        x = feat.squeeze(2)  # [N, C, W]
        x = x.permute(2, 0, 1).contiguous()  # [W, N, C]
        recurrent, _ = self.rnn(x)  # [W, N, C] [75, 64, 256]
        x = recurrent[-1, :, : ] # [N, C] [64, 256]
        outputs = self.classifier(x)  # [N, num_lang]
        # outputs_pad = outputs[-1, :, :].squeeze(0) # [N, num_lang]
        # outputs = outputs.permute(1, 0, 2).contiguous() #[N, W, num_lang]
        return outputs