# coding=utf-8

import torch.nn as nn
import torch
from modules.transformation import TPS_SpatialTransformerNetwork
from modules.feature_extraction import VGG_FeatureExtractor, RCNN_FeatureExtractor, ResNet_FeatureExtractor, \
    MobileNetV3_FeatureExtractor, DenseNet, CNN_lite
from modules.resnet_aster import ResNet_ASTER
from modules.Base_ResNet import Base_ResNet_FeatureExtractor
from modules.sequence_modeling import BidirectionalLSTM, BidirectionalGRU
from modules.prediction import Attention
from modules.network_torch import CRNN
from modules.Res50Master import ConvEmbeddingGC
from modules.resnet_FPN import ResNet_FPN
from modules.SRN_modules import Transforme_Encoder, SRN_Decoder, Torch_transformer_encoder

from config import ConfigOpt


class Model(nn.Module):

    def __init__(self, opt):
        super(Model, self).__init__()
        self.opt = opt
        self.stages = {'Trans': opt.Transformation, 'Feat': opt.FeatureExtraction,
                       'Seq': opt.SequenceModeling, 'Pred': opt.Prediction}

        """ Transformation """
        if opt.Transformation == 'TPS':
            self.Transformation = TPS_SpatialTransformerNetwork(
                F=opt.num_fiducial, I_size=(opt.imgH, opt.imgW), I_r_size=(opt.imgH, opt.imgW),
                I_channel_num=opt.input_channel)
        else:
            print('No Transformation module specified')

        """ FeatureExtraction """
        if opt.FeatureExtraction == 'VGG':
            self.FeatureExtraction = VGG_FeatureExtractor(opt.input_channel, opt.output_channel)
        elif opt.FeatureExtraction == 'RCNN':
            self.FeatureExtraction = RCNN_FeatureExtractor(opt.input_channel, opt.output_channel)
        elif opt.FeatureExtraction == 'ResNet':
            self.FeatureExtraction = ResNet_FeatureExtractor(opt.input_channel, opt.output_channel)
        elif opt.FeatureExtraction == 'ResNet_50':
            self.FeatureExtraction = ResNet_ASTER(opt.input_channel, opt.output_channel)
        elif opt.FeatureExtraction == 'MobileNetV3':
            self.FeatureExtraction = MobileNetV3_FeatureExtractor(opt.input_channel, opt.output_channel)
        elif opt.FeatureExtraction == 'DenseNet':
            self.FeatureExtraction = DenseNet(opt.input_channel)  # output channel is 768
        elif opt.FeatureExtraction == 'CNN_Lite':
            self.FeatureExtraction = CNN_lite(opt.input_channel)  # output channel is 512
        elif opt.FeatureExtraction == 'ResNet_FPN':
            self.FeatureExtraction = ResNet_FPN()  # output channel is 512
        elif opt.FeatureExtraction == 'Base_ResNet':
            self.FeatureExtraction = Base_ResNet_FeatureExtractor(opt.input_channel, opt.output_channel)
        elif opt.FeatureExtraction == 'ConvEmbeddingGC':
            self.FeatureExtraction = ConvEmbeddingGC(opt.input_channel, opt.output_channel)
        else:
            raise Exception('No FeatureExtraction module specified')
        # self.FeatureExtraction_output = opt.output_channel * 2  # int(imgH/16-1) * 512
        self.FeatureExtraction_output = opt.output_channel
        # self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1))  # Transform final (imgH/16-1) -> 1

        """ Sequence modeling"""
        if opt.SequenceModeling == 'BiLSTM':
            self.SequenceModeling = nn.Sequential(
                BidirectionalLSTM(self.FeatureExtraction_output, opt.hidden_size, opt.hidden_size),
                BidirectionalLSTM(opt.hidden_size, opt.hidden_size, opt.hidden_size))
            self.SequenceModeling_output = opt.hidden_size
        elif opt.SequenceModeling == 'GRU':
            self.SequenceModeling = nn.Sequential(
                BidirectionalGRU(self.FeatureExtraction_output, opt.hidden_size, opt.hidden_size),
                BidirectionalGRU(opt.hidden_size, opt.hidden_size, opt.hidden_size))
            self.SequenceModeling_output = opt.hidden_size
        elif opt.SequenceModeling == 'SRN':
            self.SequenceModeling = Transforme_Encoder(n_layers=2, n_position=opt.position_dim)
            # self.SequenceModeling = Torch_transformer_encoder(n_layers=2, n_position=opt.position_dim)
            self.SequenceModeling_output = 512
        else:
            print('No SequenceModeling module specified')
            self.SequenceModeling_output = self.FeatureExtraction_output

        """ Prediction """
        if opt.Prediction == 'CTC':
            self.Prediction = nn.Linear(self.SequenceModeling_output, opt.num_class)
        elif opt.Prediction == 'Attn':
            self.Prediction = Attention(self.SequenceModeling_output, opt.hidden_size, opt.num_class)
        elif opt.Prediction == 'SRN':
            self.Prediction = SRN_Decoder(n_position=opt.position_dim, N_max_character=opt.batch_max_character + 1,
                                          n_class=opt.alphabet_size)
        else:
            raise Exception('Prediction is neither CTC or Attn')

    '''Freeze Layers'''
    def freeze_backbone(self):
        for param in self.FeatureExtraction.parameters():
            param.requires_grad = False

        for param in self.SequenceModeling.parameters():
            param.requires_grad = False

    def unfreeze_backbone(self):
        for param in self.FeatureExtraction.parameters():
            param.requires_grad = True

        for param in self.SequenceModeling.parameters():
            param.requires_grad = True


    def forward(self, input, is_train=True):
        """ Transformation stage """
        if not self.stages['Trans'] == "None":
            input = self.Transformation(input)

        """ Feature extraction stage """
        visual_feature = self.FeatureExtraction(input)
        # visual_feature = self.AdaptiveAvgPool(visual_feature.permute(0, 3, 1, 2))  # [b, c, h, w] -> [b, w, c, h]
        visual_feature = visual_feature.permute(0, 3, 1, 2)
        visual_feature = visual_feature.squeeze(3)

        """ Sequence modeling stage """
        if self.stages['Seq'] == 'BiLSTM':
            contextual_feature = self.SequenceModeling(visual_feature)

        elif self.stages['Seq'] == 'SRN':
            contextual_feature = self.SequenceModeling(visual_feature, src_mask=None)[0]

        else:
            contextual_feature = visual_feature  # for convenience. this is NOT contextually modeled by BiLSTM

        """ Prediction stage """
        if self.stages['Pred'] == 'CTC':
            prediction = self.Prediction(contextual_feature.contiguous())

        elif self.stages['Pred'] == 'SRN':
            prediction = self.Prediction(contextual_feature)

        else:
            # Attn
            # prediction = self.Prediction(contextual_feature.contiguous(), text, is_train,
            #                              batch_max_length=self.opt.batch_max_length)
            prediction = self.Prediction(contextual_feature.contiguous())
        return prediction


# class Model_CRNN(nn.Module):

#     def __init__(self, opt):
#         super(Model_CRNN, self).__init__()
#         self.opt = opt
#         self.stages = {'Trans': opt.Transformation, 'Feat': opt.FeatureExtraction,
#                        'Seq': opt.SequenceModeling, 'Pred': opt.Prediction}

#         """ Transformation """
#         if opt.Transformation == 'TPS':
#             self.Transformation = TPS_SpatialTransformerNetwork(
#                 F=opt.num_fiducial, I_size=(opt.imgH, opt.imgW), I_r_size=(opt.imgH, opt.imgW),
#                 I_channel_num=opt.input_channel)
#         else:
#             print('No Transformation module specified')

#         """ FeatureExtraction """
#         if opt.FeatureExtraction == 'VGG':
#             self.FeatureExtraction = VGG_FeatureExtractor(opt.input_channel, opt.output_channel)
#         elif opt.FeatureExtraction == 'RCNN':
#             self.FeatureExtraction = RCNN_FeatureExtractor(opt.input_channel, opt.output_channel)
#         elif opt.FeatureExtraction == 'ResNet':
#             self.FeatureExtraction = ResNet_FeatureExtractor(opt.input_channel, opt.output_channel)
#         else:
#             raise Exception('No FeatureExtraction module specified')

#         self.FeatureExtraction_output = opt.output_channel  # int(imgH/16-1) * 512
#         # self.FeatureExtraction_output = opt.output_channel * 2  # cat ResNet
#         # self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1))  # Transform final (imgH/16-1) -> 1  高度压缩成1   除了FPN，别的都不用高度压缩

#         """ Sequence modeling"""
#         if opt.SequenceModeling == 'BiLSTM':
#             self.SequenceModeling = nn.Sequential(
#                 BidirectionalLSTM(self.FeatureExtraction_output, opt.hidden_size, opt.hidden_size),
#                 BidirectionalLSTM(opt.hidden_size, opt.hidden_size, opt.hidden_size))
#             self.SequenceModeling_output = opt.hidden_size
#         else:
#             print('No SequenceModeling module specified')
#             self.SequenceModeling_output = self.FeatureExtraction_output

#         """ Prediction """
#         if opt.Prediction == 'CTC':
#             self.Prediction = nn.Linear(self.SequenceModeling_output, opt.num_class)
#         elif opt.Prediction == 'Attn':
#             self.Prediction = Attention(self.SequenceModeling_output, opt.hidden_size, opt.num_class)
#         else:
#             raise Exception('Prediction is neither CTC or Attn')

#     def forward(self, input, text, is_train=True):
#         """ Transformation stage """
#         if not self.stages['Trans'] == "None":
#             input = self.Transformation(input)

#         """ Feature extraction stage """
#         visual_feature = self.FeatureExtraction(input)
#         visual_feature = self.AdaptiveAvgPool(visual_feature.permute(0, 3, 1, 2))  # [b, c, h, w] -> [b, w, c, h]
#         visual_feature = visual_feature.squeeze(3)

#         """ Sequence modeling stage """
#         if self.stages['Seq'] == 'BiLSTM':
#             contextual_feature = self.SequenceModeling(visual_feature)
#         else:
#             contextual_feature = visual_feature  # for convenience. this is NOT contextually modeled by BiLSTM

#         """ Prediction stage """
#         if self.stages['Pred'] == 'CTC':
#             prediction = self.Prediction(contextual_feature.contiguous())
#         else:
#             prediction = self.Prediction(contextual_feature.contiguous(), text, is_train,
#                                          batch_max_length=self.opt.batch_max_length)

#         return prediction


def numel(model):
    return sum(p.numel() for p in model.parameters())


if __name__ == "__main__":
    opt = ConfigOpt()
    opt.imgW = 640
    opt.FeatureExtraction = 'ConvEmbeddingGC'
    opt.SequenceModeling = 'BiLSTM'
    opt.Prediction = 'CTC'
    opt.batch_max_length = 36
    opt.output_channel = 256
    opt.hidden_size = 256
    opt.SRN_PAD = len(opt.character) - 1
    # opt.position_dim = 26
    opt.rgb = None
    opt.alphabet_size = len(opt.character)
    x = torch.randn(1, 1, 32, 640)
    print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel,
          opt.output_channel, opt.hidden_size, opt.num_class, opt.batch_max_length,
          opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling, opt.Prediction)
    model = Model(opt)
    print(model)

    num_params = numel(model)
    print('Model params: {:4f}M'.format(num_params * 4 / 1024 / 1024))
    model = model(x)
    print(model)
