import torch
import torch.nn as nn
import torch.nn.functional as F

from .resnet import build_resnet101
from .resnext import build_resnext101

import torch    #lxd 
import pandas as pd
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import sys, math
from .Dual import *
from .Preprocessing import *
from .AnswerDecoder import *
from .init_weight import init_weight
from .GraphNN import *
from .utils import *
from .Attention import *   # lxd

from .AEDecoder import *   # zwc


def cuda_list_to_tensor(list_of_tensor):
    """
    将一个包含多个tensor的list转换为一个tensor
    :param list_of_tensor: 包含多个tensor的list
    :return: 一个tensor
    :author: qms
    """
    return torch.tensor(np.array([item.cpu().detach().numpy() for item in list_of_tensor])).cuda()


class MSVQA(nn.Module):
    """
    MSVQA 模型上半部分
    resnet101            
    resnext101 + GCN + Decoder
    BiLSTM
    :author: qms, lxd, zwc
    """

    def __init__(self, resnext101_pretrained_path, vocab, batch_size, num_clips, num_frames_per_clip, image_height, image_width, vision_dim, module_dim, word_dim, num_of_nodes, graph_module, graph_layers,unit_layers):
        """
        初始化
        :param resnext101_pretrained_path: resnext101预训练模型路径
        :param vocab: 词汇表路径
        :param batch_size: 批次大小
        :param num_clips: 视频片段数
        :param num_frames_per_clip: 每个视频片段的帧数
        :param image_height: 图片高度
        :param image_width: 图片宽度
        :param vision_dim: 视频特征维度，用于对于视频编码器之后，对于两部分特征提取的部分，维度输入的初始化
        :param module_dim: 主要用于blstm和视频特征提取的输出维度的定义，及之后多头gcn中输入输出维度初始化及之后文本答案生成中输入维度初始化
        :param word_dim: 词嵌入向量的维度，设计文本特征时用到，blstm和多头gcn中用到
        :param num_of_nodes: 在用于邻接矩阵创建上  n*n
        :param graph_module: GAT，由gcn变形过来的模型
        :param graph_layers: gcn中用到的每一层中unit_layers中的细分小层次appearence和motion处理层次
        :param unit_layers: gcn中对于图中间过程处理几次
        :author: qms, lxd, zwc
        """
        super().__init__()
        
        self.resnet101_model = build_resnet101()
        self.resnext101_model = build_resnext101(resnext101_pretrained_path)
        #以下是对于特征处理加blstm之后的部分直到出来一个融合特征
        self.feature_aggregation = ContextSelfAttn(module_dim)  # 自注意力加强一波

        encoder_vocab_size = len(vocab['question_token_to_idx'])  # question_vocabsize  得到要求的问题和答案的相应的要求维度
        #该维度在在InputUnitLinguisticDynamic 中用到初始化输入维度
        self.num_classes = len(vocab['answer_token_to_idx'])  # ans_vocabsize    在最后生成答案SimpleOutputUnitOpenEnded中用到初始化输出维度
        self.linguistic_input_unit = InputUnitLinguisticDynamic(vocab_size=encoder_vocab_size, wordvec_dim=word_dim,
                                                                rnn_dim=module_dim, textual_encoder='LSTM')
        # 文本处理提出特征编码，下两行是进行视频两个特征编码提取
        self.visual_appearance_input_unit = VisualAppearanceEncoder(appearance_dim=vision_dim, module_dim=module_dim,
                                                                    bidirectional=True)
        self.visual_motion_input_unit = nn.Linear(vision_dim, module_dim)
        if image_height == 224 and image_width == 224:
            self.visual_motion_input_unit = nn.Linear(32768, module_dim)
        # 设计到lxd的部分
        self.visual_input_unit = DualVGRUnit_multiple(word_dim=word_dim, module_dim=module_dim,
                                                      num_of_nodes=num_of_nodes, appearance_graph_layers=graph_layers,
                                                      motion_graph_layers=graph_layers, graph_module=graph_module,
                                                      unit_layers=unit_layers)

        self.output_unit = SimpleOutputUnitOpenEnded(module_dim=module_dim, num_answers=self.num_classes)
        # 两个特征融合层，在AnswerDecoder位置
        init_modules(self.modules(), w_init="xavier_uniform")  # 初始化权重   在utils中
        nn.init.uniform_(self.linguistic_input_unit.encoder_embed.weight, -1.0, 1.0)  # 权重初始化

        # zwc 部分
        self.AE = AEDecoder(batch_size, num_clips, num_frames_per_clip, image_height, image_width)
        self.AE.cuda()
        self.AE.apply(weights_init)
        # apply 函数会递归地搜索网络内的所有module并把参数表示的函数应用到所有的module上

    def forward(self, appearance_clips, motion_clips, question, question_len, stage=1):
        """
        前向传播
        :param appearance_clips: 视频外观特征，Tensor[batch_size, num_clips, num_frames_per_clip, 3, image_height, image_width]
        :param motion_clips: 视频运动特征，Tensor[batch_size, num_clips, 3, num_frames_per_clip, image_height, image_width]
        :param question: 问题文本，Tensor[batch_size, seq_len]
        :param question_len: 问题文本长度，Tensor[batch_size]
        """
        # appearance_clips 是[batch_size, num_clips, num_frames_per_clip, 3, image_height, image_width]
        # 在原版中，将 num_clips 个[num_frames_per_clip, 3, image_height, image_width]输入resnet101模型，得到 num_clips 个[num_frames_per_clip, 2048]的输出
        # 现在将每个batch的每个clip的每个frame输入resnet101模型，得到[batch_size, num_clips, num_frames, visual_inp_dim]的输出
        appearance_feature = []
        for batch in appearance_clips:
            batch_feature = []
            for clip in batch:
                feats = self.resnet101_model(clip)
                feats = feats.squeeze()
                batch_feature.append(feats)
            batch_feature = cuda_list_to_tensor(batch_feature)
            appearance_feature.append(batch_feature)
        appearance_feature = cuda_list_to_tensor(appearance_feature)

        # motion_clips是tensor[batch_size, num_clips, 3, num_frames_per_clip, image_height, image_width]
        # 在原版中，clips 是 tensor[24, 3, 16, 224, 224]，即 [num_clips, 3, num_frames_per_clip, image_height, image_width] 输入resnext101模型，得到 [24, 32768] 的输出
        # 现在将每个batch的[num_clips, 3, num_frames_per_clip, image_height, image_width]输入resnext101模型
        motion_feature = []
        for batch in motion_clips:
            feats = self.resnext101_model(batch)
            feats = feats.squeeze()
            motion_feature.append(feats)
        motion_feature = cuda_list_to_tensor(motion_feature)
        """
        video_appearance_feat: [Tensor] (batch_size, num_clips, num_frames, visual_inp_dim)
        video_motion_feat: [Tensor] (batch_size, num_clips, visual_inp_dim)
        """
        
        #这里lxd运行部分
        batch_size = question.size(0)
        # get image, word, and sentence embeddings
        question_embedding, word_embedding, dynamic_question_embedding = self.linguistic_input_unit(question, question_len)
        # question_embedding: (batch_size, module_dim)
        # word_embedding: (batch_size, seq_len, module_dim) @ word features
        # dynamic_question_embedding: (batch_size, seq_len, module_dim)
        video_appearance_feat = self.visual_appearance_input_unit( appearance_feature)  # appearance feature-LSTM: [Tensor] batch_size * num_of_clips * module_dim
        video_motion_feat = self.visual_motion_input_unit(motion_feature)  # motion feature-Linear: [Tensor] batch_size * num_of_clips * module_dim
        # Reasoning module
        ## 1. layer-1
        visual_embedding, aq_embed, mq_embed, com_app, com_motion, aq_fusion, mq_fusion, appearance_video_feat, motion_video_feat = self.visual_input_unit(
            video_appearance_feat, video_motion_feat, dynamic_question_embedding, word_embedding, question_len)
        
        """
        encoded appearance feature: [Tensor] (batch_size, num_of_clips, module_dim)
        encoded motion feature: [Tensor] (batch_size, num_of_clips, module_dim)
        """

        if stage == 1:
            rebuild_appearance, rebuild_motion = self.AE(appearance_video_feat, motion_video_feat)
            return rebuild_appearance, rebuild_motion
        else:
            # output module
            visual_embedding = self.feature_aggregation(visual_embedding)
            out = self.output_unit(question_embedding, visual_embedding)  #为融合的答案
            return out, aq_embed, mq_embed, com_app, com_motion, aq_fusion, mq_fusion
        