import torch
from src.base_networks import *
from torch import nn
from src import const
import os
import torchvision
from torch.nn import functional as F

class ExpNetwork(ModuleWithAttr):
    '''
    将图像和文字直接组合的尝试，效果不好，没有norm？建议放弃掉
    '''

    def __init__(self, pretrained_embeddings=None):
        super(ExpNetwork, self).__init__()
        self.text_embedding = AverageTextEmbedding(pretrained_embeddings)
        self.image_embedding = ImageEmbedding()
        self.image_text_loss = InnerProductContrastiveLoss()
        self.outfit_loss = OutfitSimpleCenterLoss()
        self.inter_center_product_loss = InterCenterProductLoss()

    def forward(self, sample):
        text_embedding = self.text_embedding(sample)
        image_embedding = self.image_embedding(sample)
        text_mask = sample['word_mask']
        image_mask = sample['image_mask']
        return {
            'text_embedding': text_embedding,
            'image_embedding': image_embedding,
            'text_mask': text_mask,
            'image_mask': image_mask,
        }

    def cal_loss(self, sample, output):
        text_embedding = output['text_embedding']
        image_embedding = output['image_embedding']
        text_mask = output['text_mask']
        image_mask = output['image_mask']

        # # 这里做了norm，可以考虑其他方法
        text_embedding = text_embedding / (torch.norm(text_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)

        line_text_embedding = text_embedding.reshape(-1, text_embedding.shape[2])
        line_image_embedding = image_embedding.reshape(-1, image_embedding.shape[2])
        line_text_mask = sample['word_mask'].reshape(-1)  # embedding1是否有效
        line_image_mask = sample['image_mask'].reshape(-1)  # embedding2是否有效

        # 给embedding 加 L2 loss
        norm_text_loss = ((text_embedding * text_mask.unsqueeze(2).float()) ** 2).sum() / text_mask.sum().float()
        norm_image_loss = ((image_embedding * image_mask.unsqueeze(2).float()) ** 2).sum() / image_mask.sum().float()

        # image embedding和text embedding一致
        image_text_loss = self.image_text_loss(line_text_embedding, line_image_embedding, line_text_mask, line_image_mask)
        outfit_center_loss_dict = self.outfit_loss(image_embedding, image_mask)
        outfit_center_loss = outfit_center_loss_dict['all']
        outfit_comp_metrics = -outfit_center_loss_dict['every']  # 因为标记为1的应该更大，所以取负号这里

        # center向量之间
        centers = (image_mask.unsqueeze(2).float() * image_embedding).sum(dim=1) / image_mask.sum(dim=1, keepdim=True).float()
        centers = centers.unsqueeze(dim=1)
        inter_center_product_loss = self.inter_center_product_loss(centers)

        loss_structure = [
            ['norm_text_loss', const.WEIGHT_NORM_TEXT, norm_text_loss],
            ['norm_image_loss', const.WEIGHT_NORM_IMAGE, norm_image_loss],
            ['image_text_loss', const.WEIGHT_IMAGE_TEXT, image_text_loss],
            ['outfit_center_loss', const.WEIGHT_OUTFIT_CENTER, outfit_center_loss],
            ['inter_center_product_loss', const.WEIGHT_INTER_PRODUCT, inter_center_product_loss],
        ]

        all_loss = sum([loss_pair[1] * loss_pair[2] for loss_pair in loss_structure])

        return {
            'structure': loss_structure,
            'all': all_loss,
            'outfit_comp_metrics': outfit_comp_metrics,
        }

    def fitb_ans(self, sample):
        image_embedding = self.image_embedding({'images': sample['images']})
        answer_embedding = self.image_embedding({'images': sample['answer_images']})
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        answer_embedding = answer_embedding.reshape(-1, image_embedding.shape[-1])
        center = image_embedding.mean(dim=0, keepdim=True)  # 这个网络的center是直接取mean
        diff = answer_embedding - center
        diff = (diff ** 2).sum(dim=1)
        ans_index = diff.argmin().item()
        return ans_index
    
    def get_embedding(self, sample):
                # image: 3 x 224 x 224
        sample['images'] = sample['images'].unsqueeze(dim=0)
        image_embedding = self.image_embedding(sample)
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        if hasattr(const, 'SCALE_FACTOR'):
            image_embedding = const.SCALE_FACTOR * image_embedding
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        return image_embedding
