import torch
import torchvision
from torch import nn
from torch.nn import functional as F
import const


class VGG16Extractor(nn.Module):
    def __init__(self):
        super(VGG16Extractor, self).__init__()
        self.select = {
            '22': 'conv_4_3',  # [batch_size, 512, 28, 28]
            '23': 'pooled_4',  # [batch_size, 512, 14, 14]
            '30': 'pooled_5',  # [batch_size , 512, 7, 7]
        }
        self.vgg = torchvision.models.vgg16(pretrained=True).features

    def forward(self, x):
        ret = {}
        for name, layer in self.vgg._modules.items():
            x = layer(x)
            if name in self.select:
                ret[self.select[name]] = x
            # print(x.size())
            # print(name, layer)
        return ret


class VGG16BaselineNet(nn.Module):

    def __init__(self):
        super(VGG16BaselineNet, self).__init__()
        self.vgg16_extractor = VGG16Extractor()
        self.category_fc1 = nn.Linear(512 * 7 * 7, 1024)
        self.category_fc2 = nn.Linear(1024, 48)
        self.attr_fc1 = nn.Linear(512 * 7 * 7, 1024)
        self.attr_fc2 = nn.Linear(1024, 1000 * 2)
        self.lm_vis_fc1 = nn.Linear(512 * 7 * 7, 1024)
        self.lm_vis_fc2 = nn.Linear(1024, 8 * 2)
        self.lm_pos_fc1 = nn.Linear(512 * 7 * 7, 1024)
        self.lm_pos_fc2 = nn.Linear(1024, 8 * 2)
        # for evaluation
        self.step = 0

    def forward(self, sample):
        vgg16_output = self.vgg16_extractor(sample['image'])

        vgg16_pooled_5 = vgg16_output['pooled_5']
        vgg16_pooled_5_vector = vgg16_pooled_5.reshape(vgg16_pooled_5.size()[0], -1)

        category_output = self.category_fc1(vgg16_pooled_5_vector)
        category_output = F.relu(category_output)
        category_output = self.category_fc2(category_output)  # [batch_size, 48]

        attr_output = self.attr_fc1(vgg16_pooled_5_vector)
        attr_output = F.relu(attr_output)
        attr_output = self.attr_fc2(attr_output)
        attr_output = attr_output.reshape(attr_output.size()[0], 2, attr_output.size()[1] / 2)  # [batch_size, 2, 1000]

        lm_vis_output = self.lm_vis_fc1(vgg16_pooled_5_vector)
        lm_vis_output = F.relu(lm_vis_output)
        lm_vis_output = self.lm_vis_fc2(lm_vis_output)
        lm_vis_output = lm_vis_output.reshape(lm_vis_output.size()[0], 2, lm_vis_output.size()[1] / 2)  # [batch_size, 2, 8]

        lm_pos_output = self.lm_pos_fc1(vgg16_pooled_5_vector)
        lm_pos_output = F.relu(lm_pos_output)
        lm_pos_output = self.lm_pos_fc2(lm_pos_output)
        lm_pos_output = lm_pos_output.reshape(lm_pos_output.size()[0], lm_pos_output.size()[1] / 2, 2)  # [batch_size, 8, 2]
        if const.ACT_FUNC_IN_POSE == 'sigmoid':
            lm_pos_output = F.sigmoid(lm_pos_output)

        output = {}
        output['category_output'] = category_output
        output['attr_output'] = attr_output
        output['lm_vis_output'] = lm_vis_output
        output['lm_pos_output'] = lm_pos_output
        return output

if __name__ == '__main__':
    pass
    # Loss:
    # net = VGG16BaselineNet()

    # category_output, attr_output, lm_vis_output, lm_pos_output = \
    #     net(image, category_type, category_label, landmark_vis, landmark_pos, attr)

    # category_loss_func = torch.nn.CrossEntropyLoss()
    # category_loss = category_loss_func(category_output, category_label)

    # # 所有的都是2类，每个2类有1000个，都分别计算交叉熵，计算之后的交叉熵加入weight，之后再全部取平均，与我们的期望符合
    # attr_loss_func = torch.nn.CrossEntropyLoss(weight=torch.tensor([const.WEIGHT_ATTR_NEG, const.WEIGHT_ATTR_POS]))
    # attr_loss = attr_loss_func(attr_output, attr)

    # lm_vis_loss_func = torch.nn.CrossEntropyLoss(weight=torch.tensor([const.WEIGHT_LANDMARK_VIS_NEG, const.WEIGHT_LANDMARK_VIS_POS]))
    # lm_vis_loss = lm_vis_loss_func(lm_vis_output, landmark_vis)

    # landmark_vis_float = torch.unsqueeze(landmark_vis.float(), dim=2)
    # landmark_vis_float = torch.cat([landmark_vis_float, landmark_vis_float], dim=2)  # 用真实值当mask，只计算vis=1时的损失

    # lm_pos_loss_func = torch.nn.MSELoss()
    # lm_pos_loss = lm_pos_loss_func(landmark_vis_float * lm_pos_output, landmark_vis_float * landmark_pos)
