import torch
from src.base_networks import *
from src.pure_image_networks import *
from torch import nn
from src import const
import os
import torchvision
from torch.nn import functional as F
import numpy as np

class NewInnerProductContrastiveLoss(object):
    '''
    这个已经做了margin了！所以是越小越好！！！
    '''

    def __init__(self):
        n = max(const.BATCH_SIZE, const.VAL_BATCH_SIZE) * const.OUTFIT_ITEM_PAD_NUM
        self.eye = torch.eye(n, n).float().to(const.device)

    def __call__(self, embedding1, embedding2, mask1, mask2):
        scores = embedding1.matmul(embedding2.transpose(0, 1))
        diagonal = scores.diag().unsqueeze(1)
        scores_valid = mask1.unsqueeze(1).matmul(mask2.unsqueeze(0))
        diagonal_valid = scores_valid.diag().unsqueeze(1)
        loss_a = F.relu(const.EMBED_MARGIN - diagonal + scores)  # 就相当于max(0, x)了
        loss_b = F.relu(const.EMBED_MARGIN - diagonal.transpose(0, 1) + scores)
        valid_a = scores_valid * diagonal_valid
        eye = self.eye[0:scores.shape[0], 0:scores.shape[0]]
        valid_a = valid_a.float() * (1. - eye)  # 对角线上的必然不对
        valid_b = scores_valid * diagonal_valid.transpose(0, 1)
        valid_b = valid_b.float() * (1. - eye)  # 对角线上的必然不对
        loss_a = (loss_a * valid_a).sum() / valid_a.sum()
        loss_b = (loss_b * valid_b).sum() / valid_b.sum()
        loss = loss_a + loss_b
        return loss

class ImageTextNetworkV1(ModuleWithAttr):
    '''
    只有图像的信息
    '''
    def __init__(self, pretrained_embeddings=None):
        super(ImageTextNetworkV1, self).__init__()
        self.image_embedding = ImageEmbedding()
        self.image_text_embedding = ImageEmbedding(const.IMAGE_TEXT_EMBED_SIZE)
        self.text_embedding = AverageTextEmbedding(pretrained_embeddings)
        self.new_inter_center_loss = NewInterCenterLoss()
        self.new_outfit_intra_item_loss = NewOutfitIntraItemLoss()
        self.new_inter_outfit_item_loss  = NewInterOutfitItemLoss()
        self.new_intra_outfit_center_loss =  NewIntraOutfitCenterLoss()
        self.new_image_text_product_contrastive_loss = NewInnerProductContrastiveLoss()

    def forward(self, sample):
        text_embedding = self.text_embedding(sample)
        image_embedding = self.image_embedding(sample)
        image_text_embedding = self.image_text_embedding(sample)
        text_mask = sample['word_mask']
        image_mask = sample['image_mask']
        output = {
            'text_embedding': text_embedding,
            'image_embedding': image_embedding,
            'image_text_embedding': image_text_embedding,
            'text_mask': text_mask,
            'image_mask': image_mask,
        }
        return output

    def cal_loss(self, sample, output):
        image_embedding = output['image_embedding']
        image_text_embedding = output['image_text_embedding']
        text_embedding = output['text_embedding']
        image_mask = output['image_mask']
        text_mask = output['text_mask']

        # 这里做了norm，可以考虑其他方法
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        image_text_embedding = image_text_embedding / (torch.norm(image_text_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        text_embedding = text_embedding / (torch.norm(text_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        centers = (image_mask.unsqueeze(2).float() * image_embedding).sum(dim=1) / image_mask.sum(dim=1, keepdim=True).float()
        centers = centers / (torch.norm(centers, p=2, dim=1, keepdim=True) + 1e-9)
        # Scale Factor?
        if hasattr(const, 'SCALE_FACTOR'):
            image_embedding = const.SCALE_FACTOR * image_embedding
            image_text_embedding = const.SCALE_FACTOR * image_text_embedding
            text_embedding = const.SCALE_FACTOR * text_embedding

        # 对image和text做cross embedding的loss
        new_image_text_product_contrastive_loss = self.new_image_text_product_contrastive_loss(
            text_embedding.reshape(-1, text_embedding.shape[2]),
            image_text_embedding.reshape(-1, image_text_embedding.shape[2]),
            text_mask.reshape(-1),
            image_mask.reshape(-1),

        )

        # 对image_embedding做loss，主要用于compatibility
        new_inter_center_loss = self.new_inter_center_loss(centers)
        new_outfit_intra_item_dict = self.new_outfit_intra_item_loss(image_embedding, image_mask)
        new_outfit_intra_item_loss = new_outfit_intra_item_dict['loss']
        new_inter_outfit_item_loss = self.new_inter_outfit_item_loss(image_embedding, image_mask)
        new_intra_outfit_center_dict = self.new_intra_outfit_center_loss(image_embedding, centers, image_mask)
        new_intra_outfit_center_loss = new_intra_outfit_center_dict['loss']

        #  这里的metrics用哪个好？
        #  outfit_comp_metrics = new_outfit_intra_item_dict['every']  # 
        outfit_comp_metrics = new_intra_outfit_center_dict['every']

        loss_structure = [
            ['new_inter_center_loss', const.WEIGHT_INTER_CENTER, new_inter_center_loss],
            ['new_outfit_intra_item_loss', const.WEIGHT_OUTFIT_INTRA_ITEM, new_outfit_intra_item_loss],
            ['new_inter_outfit_item_loss', const.WEIGHT_INTER_OUTFIT_ITEM, new_inter_outfit_item_loss],
            ['new_intra_outfit_center_loss', const.WEIGHT_INTRA_OUTFIT_CENTER, new_intra_outfit_center_loss],
            ['new_image_text_product_contrastive_loss', const.WEIGHT_IMAGE_TEXT_PRODUCT, new_image_text_product_contrastive_loss],
        ]

        all_loss = sum([loss_pair[1] * loss_pair[2] for loss_pair in loss_structure])

        return {
            'structure': loss_structure,
            'all': all_loss,
            'outfit_comp_metrics': outfit_comp_metrics,
        }

    def fitb_ans(self, sample):
        image_embedding = self.image_embedding({'images': sample['images']})
        answer_embedding = self.image_embedding({'images': sample['answer_images']})
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        answer_embedding = answer_embedding / (torch.norm(answer_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        # Scale Factor?
        if hasattr(const, 'SCALE_FACTOR'):
            image_embedding = const.SCALE_FACTOR * image_embedding
            answer_embedding = const.SCALE_FACTOR * answer_embedding
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        answer_embedding = answer_embedding.reshape(-1, image_embedding.shape[-1])
        scores = image_embedding.matmul(answer_embedding.transpose(0, 1))
        scores = scores.mean(dim=0)  # 内积越大越靠近
        ans_index = torch.argmax(scores).item()
        return ans_index
    
    def get_embedding(self, sample):
        # image: 3 x 224 x 224
        sample['images'] = sample['images'].unsqueeze(dim=0)
        image_embedding = self.image_embedding(sample)
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        if hasattr(const, 'SCALE_FACTOR'):
            image_embedding = const.SCALE_FACTOR * image_embedding
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        return image_embedding