from __future__ import absolute_import

import sys

import torch
from thop import profile
from torch import nn
from torch.nn import functional as F

from config import get_args
from . import create
from .attention_recognition_head_one_point_step import Step_Generation_Attention
from .stn_head import STNHead
from .tps_spatial_transformer import TPSSpatialTransformer
from ..loss.sequenceCrossEntropyLoss import SequenceCrossEntropyLoss, LabelSmoothCrossEntropyLoss
from ..loss.distanceLoss import DistanceLossV1

global_args = get_args(sys.argv[1:])


class ModelBuilder(nn.Module):
    """
    This is the integrated model.
    """

    def __init__(self, arch, rec_num_classes, sDim, attDim, max_len_labels, eos, STN_ON=False, label_smooth=True,show_atten=False,global_info=False, print_model_size=False):
        super(ModelBuilder, self).__init__()
        self.show_atten = show_atten
        self.global_info = global_info
        self.arch = arch
        self.rec_num_classes = rec_num_classes
        self.sDim = sDim
        self.attDim = attDim
        self.max_len_labels = max_len_labels
        self.eos = eos
        self.STN_ON = STN_ON
        self.tps_inputsize = global_args.tps_inputsize

        if self.STN_ON:
            self.stn_head = STNHead(
                in_planes=3,
                num_ctrlpoints=global_args.num_control_points,
                activation=global_args.stn_activation)
            self.tps = TPSSpatialTransformer(
                output_image_size=tuple(global_args.tps_outputsize),
                num_control_points=global_args.num_control_points,
                margins=tuple(global_args.tps_margins))

        self.encoder = create(self.arch,  # arch: ["ResNet_ASTER", "ResNet_ASTER_2D_4x25", "ResNet_ASTER_2D_4x16", "ResNet_FPN"]
                              with_lstm=False,
                              n_group=global_args.n_group)

        self.decoder = Step_Generation_Attention(n_dim=512, num_class=rec_num_classes)

        if label_smooth == True:
            self.rec_crit = LabelSmoothCrossEntropyLoss(0.1)
        else:
            self.rec_crit = SequenceCrossEntropyLoss()
        self.dist_crit = DistanceLossV1()
        self.print_model_size = print_model_size

    def forward(self, input_dict):
        return_dict = {}
        return_dict['losses'] = {}
        return_dict['output'] = {}

        # x:[b,3,64,256], rec_targets:[b,100], rec_lengths:[4,]
        x, rec_targets, rec_lengths = input_dict['images'], input_dict['rec_targets'], input_dict['rec_lengths']

        # rectification
        if self.STN_ON:
            # input images are downsampled before being fed into stn_head.
            stn_input = F.interpolate(x, self.tps_inputsize, mode='bilinear', align_corners=True)
            stn_img_feat, ctrl_points = self.stn_head(stn_input)
            x, _ = self.tps(x, ctrl_points)  # x:[b,3,32,100]
            if not self.training:
                # save for visualization
                return_dict['output']['ctrl_points'] = ctrl_points
                return_dict['output']['rectified_images'] = x

        if self.global_info:
            encoder_feats, global_embedding = self.encoder(x)  # 1,b,c
        else:
            encoder_feats = self.encoder(x)
            global_embedding = None
            if self.print_model_size:
                print("****** print encoder size ******")
                flops, params = profile(self.encoder, inputs=(x, ))
                flops = flops / encoder_feats.size(0)
                print('flops:{}'.format(flops))
                print('params:{}'.format(params))
                print("****** print encoder size ******")
        encoder_feats = encoder_feats.contiguous()

        if self.training:
            max_len = max(rec_lengths)
            rec_targets = rec_targets[:, :max_len]
            rec_pred, step_points = self.decoder(encoder_feats, rec_targets, rec_lengths)
            loss_rec = self.rec_crit(rec_pred, rec_targets, rec_lengths)
            # loss_dist = self.dist_crit(step_points, rec_lengths, encoder_feats.size())
            rec_pred = rec_pred.argmax(-1)
            return_dict['losses']['loss_rec'] = loss_rec
            # return_dict['losses']['loss_dist'] = loss_dist
            return_dict['output']['pred_rec'] = rec_pred
            return_dict["output"]["step_points"] = step_points
            if self.print_model_size:
                print("****** print decoder size ******")
                flops, params = profile(self.decoder, inputs=(encoder_feats, rec_targets, rec_lengths, ))
                flops = flops / encoder_feats.size(0)
                print('flops:{}'.format(flops))
                print('params:{}'.format(params))
                print("****** print decoder size ******")
                self.print_model_size = False
        else:
            max_len = max(rec_lengths)
            rec_pred, step_points = self.decoder(encoder_feats, init_max_len=self.max_len_labels)
            # rec_targets = rec_targets[:, :max_len]  # b,max_len
            # loss_rec = self.rec_crit(rec_pred[:, :max_len, :], rec_targets, rec_lengths)
            rec_pred = rec_pred.argmax(-1)
            return_dict['losses']['loss_rec'] = torch.tensor(0.0).cuda()
            return_dict['output']['pred_rec'] = rec_pred
            return_dict["output"]["step_points"] = step_points
            return_dict["output"]["feature"] = encoder_feats
            if self.print_model_size:
                print("****** print decoder size ******")
                flops, params = profile(self.decoder, inputs=(encoder_feats, None, None, self.max_len_labels, ))
                flops = flops / encoder_feats.size(0)
                print('flops:{}'.format(flops))
                print('params:{}'.format(params))
                print("****** print decoder size ******")
                self.print_model_size = False

        # pytorch0.4 bug on gathering scalar(0-dim) tensors
        for k, v in return_dict['losses'].items():
            return_dict['losses'][k] = v.unsqueeze(0)

        return return_dict
