import logging
import mindspore as ms
from mindspore import nn

from PositionAttention import *
from ms_Retsranformer import ResTranformer
from ms_resnet import resnet45
from ms_model import Model,_default_tfmer_cfg
from ms_BaseVision import BaseVision
from ms_alignment import BaseAlignment
from ms_bcnlanguage import BCNLanguage

class ABINetIterModel(nn.Cell):
    def __init__(self, config):
        super().__init__()
        #self.iter_size = ifnone(config.model_iter_size, 1)
        self.iter_size = 3
        self.max_length = config.dataset_max_length + 1  # additional stop token
        self.vision = BaseVision(config)
        self.language = BCNLanguage(config)
        self.alignment = BaseAlignment(config)

    def construct(self, images, *args):
        v_res = self.vision(images)
        a_res = v_res
        all_l_res, all_a_res = [], []
        for _ in range(self.iter_size):
            ms_softmax = nn.Softmax()
            tokens = ms_softmax(a_res['logits'])
            #tokens = nn.Softmax(a_res['logits'], axis=-1)
            lengths = a_res['pt_lengths']
            #lengths.clamp_(2, self.max_length)  # TODO:move to langauge model
            lengths = ms.ops.clip_by_value(lengths,2,self.max_length)
            l_res = self.language(tokens)#本应该有lengths，但尚不知怎么添加transformerdecoder的mask，所以没用到。
            all_l_res.append(l_res)
            a_res = self.alignment(l_res['feature'], v_res['feature'])
            all_a_res.append(a_res)
        if self.training:
            return all_a_res, all_l_res, v_res
        else:
            return a_res, all_l_res[-1], v_res
