import logging
import mindspore as ms
import numpy as np
from mindspore import nn


from src.ms_Retsranformer import ResTranformer
from src.ms_resnet import resnet45
from src.ms_model import Model,_default_tfmer_cfg
from src.ms_BaseVision import BaseVision
from src.PositionAttention import *
from src.ms_alignment import BaseAlignment
from src.ms_bcnlanguage import BCNLanguage

class ABINetIterModel(nn.Cell):
    def __init__(self,config):
        super().__init__()
        self.iter_size = 3
        self.max_length = config.dataset_max_length + 1  # additional stop token
        self.vision = BaseVision(config)
        self.language = BCNLanguage(config)
        self.alignment = BaseAlignment(config)


    def construct(self, images, *args):
        # print("before training one step")
        v_res = self.vision(images)

        # print(v_res.keys())
        # print("after vision")
        a_res = v_res
        # {'feature': attn_vecs, 'logits': logits, 'pt_lengths': pt_lengths,
        # 'attn_scores': attn_scores, 'loss_weight':self.loss_weight, 'name': 'vision'}        
 

        all_l_res, all_a_res = [], []
        for _ in range(self.iter_size):
            ms_softmax = nn.Softmax()
            tokens = ms_softmax(a_res['logits'])
            #tokens = nn.Softmax(a_res['logits'], axis=-1)
            lengths = a_res['pt_lengths']
            #lengths.clamp_(2, self.max_length)  # TODO:move to langauge model
            lengths = ms.ops.clip_by_value(lengths,2,self.max_length)
            l_res = self.language(tokens,lengths)#本应该有lengths，但尚不知怎么添加transformerdecoder的mask，所以没用到。
            # print(l_res)

            all_l_res.append(l_res)
            a_res = self.alignment(l_res['feature'], v_res['feature'])

            # print(a_res)


            all_a_res.append(a_res)



        # all_a_res = []
        # all_l_res = []
        # logits = ms.Tensor(np.ones((1,26,37)),ms.dtype.float32)
        # pt_lengths = ms.Tensor(np.ones((1)),ms.dtype.int32)
        
        # a_res1 = {'logits':logits,'pt_lengths':pt_lengths,'loss_weight': 1.0, 'name': 'alignment'}
        # a_res2 = {'logits':logits,'pt_lengths':pt_lengths,'loss_weight': 1.0, 'name': 'alignment'}
        # a_res3 = {'logits':logits,'pt_lengths':pt_lengths,'loss_weight': 1.0, 'name': 'alignment'}
        # all_a_res.append(a_res1)
        # all_a_res.append(a_res2)
        # all_a_res.append(a_res3)
        # features = ms.Tensor(np.ones((1,26,512)),ms.dtype.float32)
        # l_res1 = {'feature':features,'logits':logits,'pt_lengths':pt_lengths,'loss_weight': 1.0, 'name': 'language'}
        # l_res2 = {'feature':features,'logits':logits,'pt_lengths':pt_lengths,'loss_weight': 1.0, 'name': 'language'}
        # l_res3 = {'feature':features,'logits':logits,'pt_lengths':pt_lengths,'loss_weight': 1.0, 'name': 'language'}
        # all_l_res.append(l_res1)
        # all_l_res.append(l_res2)
        # all_l_res.append(l_res3)
        # attn_scores = ms.Tensor(np.ones((1,26,8,32)),ms.dtype.float32)
        # v_res1 = {'feature':features,'logits':logits,'pt_lengths':pt_lengths,'attn_scores':attn_scores,'loss_weight': 1.0, 'name': 'vision'}
        # v_res2 = {'feature':features,'logits':logits,'pt_lengths':pt_lengths,'attn_scores':attn_scores,'loss_weight': 1.0, 'name': 'vision'}
        # v_res3 = {'feature':features,'logits':logits,'pt_lengths':pt_lengths,'attn_scores':attn_scores,'loss_weight': 1.0, 'name': 'vision'}
        # v_res = []
        # v_res.append(v_res1)
        # v_res.append(v_res2)
        # v_res.append(v_res3)

        # print("all_a_res:",all_a_res)
        # print("all_l_res:",all_l_res)
        # print("v_res:",v_res)
        return all_a_res, all_l_res, v_res
        
        

        # if self.training:
        #     return all_a_res, all_l_res, v_res
        # else:
        #     return a_res, all_l_res[-1], v_res
