import torch
import torch.nn as nn
import torch.nn.functional as F

###########################
# Autoencoder
###########################
# You can modify the model using convolutional layer
class AEDecoder(nn.Module):
    def __init__(self, batch_size, num_clips, num_frames_per_clip, image_height, image_width, z_size=768, inner_dim = 512, dropout_rate=0.5):
        """
        初始化
        :param batch_size: 批次大小
        :param num_clips: 视频片段数
        :param num_frames_per_clip: 每个视频片段的帧数
        :param image_height: 图片高度
        :param image_width: 图片宽度
        :param z_size: 公共变量z的维度
        :param inner_dim: 内部维度
        :param dropout_rate: 神经元的失活率，一般是0.5，也就是说有一半的神经元权重不更新，这样可以防止过拟合
        :author: chaoshangcs(github), zwc, qms
        """
        super(AEDecoder, self).__init__()
        self.layer2 = nn.Linear(z_size, 256)
        self.layer3 = nn.Linear(256, 512)
        self.layer4 = nn.Linear(512, 256)

        self.layer5_1 = nn.Linear(256, inner_dim)
        self.layer6_1 = nn.Linear(inner_dim, inner_dim * num_frames_per_clip)
        self.layer7_1 = nn.Linear(inner_dim, inner_dim * 3)
        self.layer8_1 = nn.Linear(inner_dim, inner_dim * image_height)
        self.layer9_1 = nn.Linear(inner_dim, image_width)

        self.layer5_2 = nn.Linear(256, inner_dim)
        self.layer6_2 = nn.Linear(inner_dim, inner_dim * 3)
        self.layer7_2 = nn.Linear(inner_dim, inner_dim * num_frames_per_clip)
        self.layer8_2 = nn.Linear(inner_dim, inner_dim * image_height)
        self.layer9_2 = nn.Linear(inner_dim, image_width)

        self.drop = dropout_rate
        self.batch_size = batch_size
        self.num_clips = num_clips
        self.num_frames_per_clip = num_frames_per_clip
        self.image_height = image_height
        self.image_width = image_width
        self.inner_dim = inner_dim


    def forward(self, appearance_video_feat, motion_video_feat):
        """
        前向传播
        :param appearance_video_feat: 视频视觉片段提取的特征
        :param motion_video_feat: 视频运动片段提取的特征
        :return appearance: 视频视觉片段的重构
        :return motion: 视频运动片段的重构
        :author: chaoshangcs(github), zwc, qms
        """
        appearance = F.dropout(F.relu(self.layer2(appearance_video_feat)), self.drop)
        appearance = F.dropout(F.relu(self.layer3(appearance)), self.drop)
        appearance = F.dropout(F.relu(self.layer4(appearance)), self.drop)

        motion = F.dropout(F.relu(self.layer2(motion_video_feat)), self.drop)
        motion = F.dropout(F.relu(self.layer3(motion)), self.drop)
        motion = F.dropout(F.relu(self.layer4(motion)), self.drop)

        appearance = F.relu(self.layer5_1(appearance))  # [Tensor] (batch_size, num_of_clips, inner_dim)
        appearance = F.relu(self.layer6_1(appearance)) # [Tensor] (batch_size, num_of_clips, inner_dim * num_frames_per_clip)
        appearance = appearance.view(self.batch_size, self.num_clips, self.num_frames_per_clip, self.inner_dim)
        appearance = F.relu(self.layer7_1(appearance))  # [Tensor] (batch_size, num_of_clips, num_frames_per_clip, inner_dim * 3)
        appearance = appearance.view(self.batch_size, self.num_clips, self.num_frames_per_clip, 3, self.inner_dim)
        appearance = F.relu(self.layer8_1(appearance))  # [Tensor] (batch_size, num_of_clips, num_frames_per_clip, 3, inner_dim * image_height)
        appearance = appearance.view(self.batch_size, self.num_clips, self.num_frames_per_clip, 3, self.image_height, self.inner_dim)
        appearance = self.layer9_1(appearance)  # [Tensor] (batch_size, num_of_clips, num_frames_per_clip, 3, image_height, image_width)

        motion = F.relu(self.layer5_2(motion)) # [Tensor] (batch_size, num_of_clips, inner_dim)
        motion = F.relu(self.layer6_2(motion)) # [Tensor] (batch_size, num_of_clips, inner_dim * 3)
        motion = motion.view(self.batch_size, self.num_clips, 3, self.inner_dim)
        motion = F.relu(self.layer7_2(motion)) # [Tensor] (batch_size, num_of_clips, 3, inner_dim * num_frames_per_clip)
        motion = motion.view(self.batch_size, self.num_clips, 3, self.num_frames_per_clip, self.inner_dim)
        motion = F.relu(self.layer8_2(motion)) # [Tensor] (batch_size, num_of_clips, 3, num_frames_per_clip, inner_dim * image_height)
        motion = motion.view(self.batch_size, self.num_clips, 3, self.num_frames_per_clip, self.image_height, self.inner_dim)
        motion = self.layer9_2(motion) # [Tensor] (batch_size, num_of_clips, 3, num_frames_per_clip, image_height, image_width)

        return appearance, motion
    """
    源代码中
    def set_input(self, z):
        input_Z = z  # 接收两个参数image_a和images_b并将其赋值给input
        self.input_Z.resize_(z.size()).copy_(z)  将self.input_Z的大小变成z的大小并且将z的内容复制给self.input_Z
    
    def backward_AE_pretrain(self):
        # Autoencoder loss
        AErealA, AErealB = self.AE.forward(self.real_A, self.real_B)
        self.loss_AE_pre = self.criterionAE(AErealA, self.real_A) + self.criterionAE(AErealB, self.real_A)
        self.loss_AE_pre.backward()

    上述函数中的self.criterionAE = torch.nn.MSELoss()
    """


###############################################################################
# Functions
###############################################################################


def weights_init(m):
    """
    初始化权重
    :param m: 模型
    :return: None
    :author: chaoshangcs(github)
    """
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1 or  classname.find('InstanceNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)
