# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import Sequential
from mmcv.runner import BaseModule
import mmocr.utils as utils

from mmocr.models.builder import CLASSIFIERS
from .base_classifier import BaseClassifier


@CLASSIFIERS.register_module()
class LanguageClassifier(BaseClassifier):
    def __init__(self,
                 num_lang=7,
                 in_channels=512,
                 init_cfg=dict(type='Xavier', layer='Conv2d'),
                 **kwargs):
        super().__init__(init_cfg=init_cfg)

        self.classifier = nn.Linear(in_channels, num_lang)
        # The second LSTM out dim: std_attention->512, else->256

    def forward(self, feat):
        if len(feat.size())==4:
            feat=torch.mean(feat,dim=(2,3),keepdim=False)
        else:
            feat=torch.mean(feat,dim=1,keepdim=False)
        outputs = self.classifier(feat)  # [N, num_lang]
        # outputs_pad = outputs[-1, :, :].squeeze(0) # [N, num_lang]
        # outputs = outputs.permute(1, 0, 2).contiguous() #[N, W, num_lang]
        return outputs