from __future__ import absolute_import

import sys

from torch import nn
from torch.nn import functional as F

from config import get_args
from . import create
# from .attention_recognition_head import AttentionRecognitionHead
from .attention_recognition_head_one_point import AttentionRecognitionHead
from .stn_head import STNHead
from .tps_spatial_transformer import TPSSpatialTransformer
from ..loss.sequenceCrossEntropyLoss import SequenceCrossEntropyLoss, CELoss

global_args = get_args(sys.argv[1:])


class ModelBuilder(nn.Module):
    """
    This is the integrated model.
    """

    def __init__(self, arch, rec_num_classes, sDim, attDim, max_len_labels, eos, STN_ON=False):
        super(ModelBuilder, self).__init__()

        self.arch = arch
        self.rec_num_classes = rec_num_classes
        self.sDim = sDim
        self.attDim = attDim
        self.max_len_labels = max_len_labels
        self.eos = eos
        self.STN_ON = STN_ON
        self.tps_inputsize = global_args.tps_inputsize

        self.encoder = create(self.arch,
                              with_lstm=global_args.with_lstm,
                              n_group=global_args.n_group)
        encoder_out_planes = self.encoder.out_planes

        self.decoder = AttentionRecognitionHead(
            num_classes=rec_num_classes,
            in_planes=encoder_out_planes,
            sDim=sDim,
            attDim=attDim,
            max_len_labels=max_len_labels)
        # self.rec_crit = SequenceCrossEntropyLoss()
        self.loss = CELoss(rec_num_classes - 2)  # "PADDING" index

        if self.STN_ON:
            self.tps = TPSSpatialTransformer(
                output_image_size=tuple(global_args.tps_outputsize),
                num_control_points=global_args.num_control_points,
                margins=tuple(global_args.tps_margins))
            self.stn_head = STNHead(
                in_planes=3,
                num_ctrlpoints=global_args.num_control_points,
                activation=global_args.stn_activation)

    def forward(self, input_dict):
        return_dict = {}
        return_dict['losses'] = {}
        return_dict['output'] = {}

        # x:[b,3,64,256], rec_targets:[b,100], rec_lengths:[4,]
        x, rec_targets, rec_lengths = input_dict['images'], input_dict['rec_targets'], input_dict['rec_lengths']

        # rectification
        if self.STN_ON:
            # input images are downsampled before being fed into stn_head.
            stn_input = F.interpolate(x, self.tps_inputsize, mode='bilinear', align_corners=True)
            stn_img_feat, ctrl_points = self.stn_head(stn_input)
            x, _ = self.tps(x, ctrl_points)  # x:[b,3,32,100]
            if not self.training:
                # save for visualization
                return_dict['output']['ctrl_points'] = ctrl_points
                return_dict['output']['rectified_images'] = x

        encoder_feats = self.encoder(x)  # [b,3,32,100]->[b,25,512]
        encoder_feats = encoder_feats.contiguous()

        if self.training:
            rec_pred = self.decoder([encoder_feats, rec_targets, rec_lengths])
            # loss_rec = self.rec_crit(rec_pred, rec_targets, rec_lengths)
            loss_rec = self.loss(rec_pred, rec_targets)
            return_dict['losses']['loss_rec'] = loss_rec
            return_dict['output']['pred_rec'] = rec_pred
        else:
            # rec_pred, rec_pred_scores = self.decoder.beam_search(encoder_feats, global_args.beam_width, self.eos)  # 经过测试，这个函数非常耗时，且大多操作是在CPU上进行的
            rec_pred = self.decoder([encoder_feats, rec_targets, rec_lengths])
            # loss_rec = self.rec_crit(rec_pred_, rec_targets, rec_lengths)
            loss_rec = self.loss(rec_pred, rec_targets)
            rec_pred = rec_pred.argmax(-1)
            return_dict['losses']['loss_rec'] = loss_rec
            return_dict['output']['pred_rec'] = rec_pred
            # return_dict['output']['pred_rec_score'] = rec_pred_scores

        # pytorch0.4 bug on gathering scalar(0-dim) tensors
        # for k, v in return_dict['losses'].items():
        #     return_dict['losses'][k] = v.unsqueeze(0)

        return return_dict
