import torch
from src.base_networks import *
from torch import nn
from src import const
import os
import torchvision
from torch.nn import functional as F
import numpy as np


class NewInterCenterLoss(object):
    '''
    输入：centers： [batch_size, embed_size]
    输出： ci * cj i!=j的加和，如果要center离的远，这个loss越小越好
    '''
    def __call__(self, centers):
        scores = centers.matmul(centers.transpose(0, 1))
        scores -= torch.diag(scores.diag())
        n = scores.shape[0]
        loss = scores.sum() / (n * n - n)
        return loss

class NewOutfitIntraItemLoss(object):
    '''
    输入：
        image_embedding: [batch_size, item_pad_num, embed_size]
        image_mask: [batch_size, item_pad_num]
    输出：
        同一个outfit内部，item_i * item_j的加和 i != j
        因为我们希望outfit内部尽可能地可以缩起来，所以这个loss应该越大越好，因此最后我们取负号
    '''
    def __init__(self):
        n = const.OUTFIT_ITEM_PAD_NUM
        self.eye = torch.eye(n, n).float().to(const.device)
    
    def __call__(self, image_embedding, image_mask):
        image_mask = image_mask.unsqueeze(dim=2)
        scores = torch.bmm(image_embedding, image_embedding.transpose(1, 2))
        valid =  torch.bmm(image_mask, image_mask.transpose(1, 2))
        valid = valid * (1 - self.eye.unsqueeze(dim=0))
        loss = (scores * valid).sum() / valid.sum()
        loss = -loss
        every = (scores.sum(dim=1).sum(dim=1) / valid.sum(dim=1).sum(dim=1)).detach().cpu()
        return {
            'loss': loss,
            'every': every,
        }

class NewInterOutfitItemLoss(object):
    '''
    输入：
        image_embedding: [batch_size, item_pad_num ,embed_size]
        image_mask: [batch_size, item_pad_num]
    输出：
        不同的outfit之间的item, item_outfit1_i * item_outfit2_j （i可以等于j）应该尽可能远离
        所以loss越小越好
    '''
    
    def __init__(self):
        bs =  max(const.BATCH_SIZE, const.VAL_BATCH_SIZE)
        pad_num = const.OUTFIT_ITEM_PAD_NUM
        big_zeros = np.zeros((bs * pad_num, bs * pad_num))
        ones = np.ones((pad_num, pad_num))
        for i in range(bs):
            big_zeros[i*pad_num:(i+1)*pad_num, i*pad_num:(i+1)*pad_num] = ones
        self.special_eyes = torch.tensor(big_zeros).float().to(const.device)  # 因为组内的乘积不能算，所以用这种特殊的eye矩阵
    
    def __call__(self, image_embedding, image_mask):
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        image_mask = image_mask.reshape(-1).unsqueeze(dim=1)
        scores = image_embedding.matmul(image_embedding.transpose(0, 1))
        valid = image_mask.matmul(image_mask.transpose(0, 1))
        valid = valid * (1 -  self.special_eyes[0:scores.shape[0], 0:scores.shape[1]])
        loss = (scores * valid).sum() / valid.sum()
        return loss


class PureImageNetwork(ModuleWithAttr):
    '''
    只有图像的信息
    '''
    def __init__(self, pretrained_embeddings=None):
        super(PureImageNetwork, self).__init__()
        self.image_embedding = ImageEmbedding()
        self.new_inter_center_loss = NewInterCenterLoss()
        self.new_outfit_intra_item_loss = NewOutfitIntraItemLoss()
        self.new_inter_outfit_item_loss  = NewInterOutfitItemLoss()

    def forward(self, sample):
        image_embedding = self.image_embedding(sample)
        image_mask = sample['image_mask']
        return {
            'image_embedding': image_embedding,
            'image_mask': image_mask,
        }

    def cal_loss(self, sample, output):
        image_embedding = output['image_embedding']
        image_mask = output['image_mask']
        # 这里做了norm，可以考虑其他方法
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)

        centers = (image_mask.unsqueeze(2).float() * image_embedding).sum(dim=1) / image_mask.sum(dim=1, keepdim=True).float()
        # 对centers做norm
        centers = centers / (torch.norm(centers, p=2, dim=1, keepdim=True) + 1e-9)
        
        # Scale Factor?
        if hasattr(const, 'SCALE_FACTOR'):
            centers = const.SCALE_FACTOR * centers
            image_embedding = const.SCALE_FACTOR * image_embedding
        
        new_inter_center_loss = self.new_inter_center_loss(centers)
        new_outfit_intra_item_dict = self.new_outfit_intra_item_loss(image_embedding, image_mask)
        new_outfit_intra_item_loss = new_outfit_intra_item_dict['loss']
        outfit_comp_metrics = new_outfit_intra_item_dict['every']
        new_inter_outfit_item_loss = self.new_inter_outfit_item_loss(image_embedding, image_mask)

        loss_structure = [
            ['new_inter_center_loss', const.WEIGHT_INTER_CENTER, new_inter_center_loss],
            ['new_outfit_intra_item_loss', const.WEIGHT_OUTFIT_INTRA_ITEM, new_outfit_intra_item_loss],
            ['new_inter_outfit_item_loss', const.WEIGHT_INTER_OUTFIT_ITEM, new_inter_outfit_item_loss],
        ]

        all_loss = sum([loss_pair[1] * loss_pair[2] for loss_pair in loss_structure])

        return {
            'structure': loss_structure,
            'all': all_loss,
            'outfit_comp_metrics': outfit_comp_metrics,
        }

    def fitb_ans(self, sample):
        image_embedding = self.image_embedding({'images': sample['images']})
        answer_embedding = self.image_embedding({'images': sample['answer_images']})
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        answer_embedding = answer_embedding / (torch.norm(answer_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        # Scale Factor?
        if hasattr(const, 'SCALE_FACTOR'):
            image_embedding = const.SCALE_FACTOR * image_embedding
            answer_embedding = const.SCALE_FACTOR * answer_embedding
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        answer_embedding = answer_embedding.reshape(-1, image_embedding.shape[-1])

        scores = image_embedding.matmul(answer_embedding.transpose(0, 1))
        scores = scores.mean(dim=0)  # 内积越大越靠近
        ans_index = torch.argmax(scores).item()
        return ans_index
    
    def get_embedding(self, sample):
        # image: 3 x 224 x 224
        sample['images'] = sample['images'].unsqueeze(dim=0)
        image_embedding = self.image_embedding(sample)
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        if hasattr(const, 'SCALE_FACTOR'):
            image_embedding = const.SCALE_FACTOR * image_embedding
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        return image_embedding
    
    
class NewIntraOutfitCenterLoss(object):
    '''
    输入：
        image_embedding: [batch_size, N, embed_size] 【已经norm过并且乘过scale factor的】
        centers: [batch_size, embed_size] 【已经norm过并且乘过scale factor的，怎么得到这个类并不关心】
        image_mask: [batch_size, N]
    输出：
        valid的 image_embedding和centers之间的距离 loss 【不需要sqrt吧，因为内积也相当于2阶的】
        因为都是norm过的，求距离等价于求内积的！这个loss也是越大越好
    '''
    def __call__(self, image_embedding, centers, image_mask):
        centers = centers.unsqueeze(dim=1)
        image_mask = image_mask.unsqueeze(dim = 2)
        inter = centers * (image_embedding * image_mask)
        loss = (inter).sum() / image_mask.sum().float()
        loss = -loss
        every = (inter.sum(dim=2).sum(dim=1) / image_mask.sum(dim=1).squeeze().float()).detach().cpu()
        return {
            'loss': loss,
            'every': every
        }
    
class PureImageNetworkV2(ModuleWithAttr):
    '''
    只有图像的信息
    '''
    def __init__(self, pretrained_embeddings=None):
        super(PureImageNetworkV2, self).__init__()
        self.image_embedding = ImageEmbedding()
        self.new_inter_center_loss = NewInterCenterLoss()
        self.new_outfit_intra_item_loss = NewOutfitIntraItemLoss()
        self.new_inter_outfit_item_loss  = NewInterOutfitItemLoss()
        self.new_intra_outfit_center_loss =  NewIntraOutfitCenterLoss()

    def forward(self, sample):
        image_embedding = self.image_embedding(sample)
        image_mask = sample['image_mask']
        return {
            'image_embedding': image_embedding,
            'image_mask': image_mask,
        }

    def cal_loss(self, sample, output):
        image_embedding = output['image_embedding']
        image_mask = output['image_mask']
        # 这里做了norm，可以考虑其他方法
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        centers = (image_mask.unsqueeze(2).float() * image_embedding).sum(dim=1) / image_mask.sum(dim=1, keepdim=True).float()
        # 对centers做norm
        centers = centers / (torch.norm(centers, p=2, dim=1, keepdim=True) + 1e-9)

        # Scale Factor?
        if hasattr(const, 'SCALE_FACTOR'):
            centers = const.SCALE_FACTOR * centers
            image_embedding = const.SCALE_FACTOR * image_embedding

        new_inter_center_loss = self.new_inter_center_loss(centers)
        new_outfit_intra_item_dict = self.new_outfit_intra_item_loss(image_embedding, image_mask)
        new_outfit_intra_item_loss = new_outfit_intra_item_dict['loss']
        new_inter_outfit_item_loss = self.new_inter_outfit_item_loss(image_embedding, image_mask)
        new_intra_outfit_center_dict = self.new_intra_outfit_center_loss(image_embedding, centers, image_mask)
        new_intra_outfit_center_loss = new_intra_outfit_center_dict['loss']
        
        #  这里的metrics用哪个好？
        #  outfit_comp_metrics = new_outfit_intra_item_dict['every']  # 
        outfit_comp_metrics = new_intra_outfit_center_dict['every']

        loss_structure = [
            ['new_inter_center_loss', const.WEIGHT_INTER_CENTER, new_inter_center_loss],
            ['new_outfit_intra_item_loss', const.WEIGHT_OUTFIT_INTRA_ITEM, new_outfit_intra_item_loss],
            ['new_inter_outfit_item_loss', const.WEIGHT_INTER_OUTFIT_ITEM, new_inter_outfit_item_loss],
            ['new_intra_outfit_center_loss', const.WEIGHT_INTRA_OUTFIT_CENTER, new_intra_outfit_center_loss],
        ]

        all_loss = sum([loss_pair[1] * loss_pair[2] for loss_pair in loss_structure])

        return {
            'structure': loss_structure,
            'all': all_loss,
            'outfit_comp_metrics': outfit_comp_metrics,
        }

    def fitb_ans(self, sample):
        image_embedding = self.image_embedding({'images': sample['images']})
        answer_embedding = self.image_embedding({'images': sample['answer_images']})
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        answer_embedding = answer_embedding / (torch.norm(answer_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        centers = image_embedding.mean(dim=1, keepdim=False)
        centers = centers / (torch.norm(centers, p=2, dim=1, keepdim=True) + 1e-9)
        # Scale Factor?
        if hasattr(const, 'SCALE_FACTOR'):
            centers = const.SCALE_FACTOR * centers
            image_embedding = const.SCALE_FACTOR * image_embedding
            answer_embedding = const.SCALE_FACTOR * answer_embedding
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        answer_embedding = answer_embedding.reshape(-1, image_embedding.shape[-1])
        if hasattr(const, 'FITB_USE_CENTER') and const.FITB_USE_CENTER is True:
            scores = centers.matmul(answer_embedding.transpose(0, 1))
            ans_index = torch.argmax(scores).item()
        else:
            scores = image_embedding.matmul(answer_embedding.transpose(0, 1))
            scores = scores.mean(dim=0)  # 内积越大越靠近
            ans_index = torch.argmax(scores).item()
        return ans_index
    
    def get_embedding(self, sample):
        # image: 3 x 224 x 224
        sample['images'] = sample['images'].unsqueeze(dim=0)
        image_embedding = self.image_embedding(sample)
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        if hasattr(const, 'SCALE_FACTOR'):
            image_embedding = const.SCALE_FACTOR * image_embedding
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        return image_embedding


class OutfitTripletLoss(object):
    '''
    输入：
        image_embedding：[2, outfit_item_pad, embed_size]
        image_mask: [2, outfit_item_pad]
    输出：
        利用这两个outfit来组合triplet loss
    '''
    
    def __call__(self, image_embedding, image_mask, metrics=None):
        assert image_embedding.shape[0] == 2
        if metrics is None:
            embedding1 = image_embedding[0]
            embedding2 = image_embedding[1]
            valid1 = image_mask[0, :].unsqueeze(dim=1)
            valid2 = image_mask[1, :].unsqueeze(dim=1)
            scores_same = embedding1.matmul(embedding1.transpose(0, 1))
            scores_diff = embedding1.matmul(embedding2.transpose(0, 1))
            scores = F.relu(scores_diff - scores_same + const.VOT_MARGIN)
            valid = valid1.matmul(valid1.transpose(0, 1))
            valid = valid * (valid1.matmul(valid2.transpose(0, 1)))
            valid = valid - torch.diag(valid.diag())  # 对角线上不对
            loss = (scores * valid).sum() / valid.sum()
            return loss
        else:
            embedding1 = image_embedding[0]
            embedding2 = image_embedding[1]
            valid1 = image_mask[0, :].detach().cpu().numpy().tolist()
            valid2 = image_mask[1, :].detach().cpu().numpy().tolist()

            loss = []
            valid_cnt = 0.
            for i in range(const.OUTFIT_ITEM_PAD_NUM):
                for j in range(const.OUTFIT_ITEM_PAD_NUM):
                    if i == j or int(valid1[i]) == 0 or int(valid2[i]) == 0:
                        continue
                    loss.append(F.relu(
                        metrics(embedding1[i] * embedding2[j]) - metrics(embedding1[i] * embedding1[j]) + const.VOT_MARGIN,
                    ))
                    valid_cnt += 1.
            loss = sum(loss)
            loss = loss / valid_cnt
            return loss

class VOTNetworks(ModuleWithAttr):
    '''
    只有图像的信息
    '''
    def __init__(self, pretrained_embeddings=None):
        super(VOTNetworks, self).__init__()
        self.image_embedding = ImageEmbedding()
        self.outfit_triplet_loss = OutfitTripletLoss()
        if const.VOT_SCORE_USE_CENTER is False:
            self.score_loss = NewOutfitIntraItemLoss()
        else:
            self.score_loss = NewIntraOutfitCenterLoss()
        # 注意：这个metrics也应该和内积一样，越大越好！！
        if const.GENERAL_METRICS is True:
            print('USE GENERAL METRICS')
            self.metric_branch = nn.Linear(const.IMAGE_EMBED_SIZE, 1, bias=False)
            # initilize as having an even weighting across all dimensions
        #     weight = torch.zeros(1,const.IMAGE_EMBED_SIZE)/float(const.IMAGE_EMBED_SIZE)
            weight = torch.ones(1,const.IMAGE_EMBED_SIZE) # 原来全是0，不大合理啊，先改下
            self.metric_branch.weight = nn.Parameter(weight)
        else:
            self.metric_branch = None

    def forward(self, sample):
        image_embedding = self.image_embedding(sample)
        image_mask = sample['image_mask']
        output = {
            'image_embedding': image_embedding,
            'image_mask': image_mask,
        }
        return output

    def cal_loss(self, sample, output):
        image_embedding = output['image_embedding']
        image_mask = output['image_mask']
        # 还是做norm和scale factor
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        if hasattr(const, 'SCALE_FACTOR'):
            image_embedding = const.SCALE_FACTOR * image_embedding

        outfit_triplet_loss = self.outfit_triplet_loss(image_embedding, image_mask, self.metric_branch)

        loss_structure = [
            ['outfit_triplet_loss', const.WEIGHT_OUTFIT_TRIPLET, outfit_triplet_loss],
        ]

        all_loss = sum([loss_pair[1] * loss_pair[2] for loss_pair in loss_structure])

        return {
            'structure': loss_structure,
            'all': all_loss,
        }

    def score_compatibility(self, output):  # 如果有这个函数，就用这个函数来score
        image_embedding = output['image_embedding']
        image_mask = output['image_mask']
        # 还是做norm和scale factor
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        centers = (image_mask.unsqueeze(2).float() * image_embedding).sum(dim=1) / image_mask.sum(dim=1, keepdim=True).float()
        # 对centers做norm
        centers = centers / (torch.norm(centers, p=2, dim=1, keepdim=True) + 1e-9)
        # Scale Factor?
        if hasattr(const, 'SCALE_FACTOR'):
            centers = const.SCALE_FACTOR * centers
            image_embedding = const.SCALE_FACTOR * image_embedding
        if const.VOT_SCORE_USE_CENTER is False:
            loss = self.score_loss(image_embedding, image_mask)
            return loss['every']
        else:
            loss = self.score_loss(image_embedding, centers, image_mask)
            return loss['every']
    
    def fitb_ans(self, sample):
        image_embedding = self.image_embedding({'images': sample['images']})
        answer_embedding = self.image_embedding({'images': sample['answer_images']})
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        answer_embedding = answer_embedding / (torch.norm(answer_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        centers = image_embedding.mean(dim=1, keepdim=False)
        centers = centers / (torch.norm(centers, p=2, dim=1, keepdim=True) + 1e-9)
        # Scale Factor?
        if hasattr(const, 'SCALE_FACTOR'):
            centers = const.SCALE_FACTOR * centers
            image_embedding = const.SCALE_FACTOR * image_embedding
            answer_embedding = const.SCALE_FACTOR * answer_embedding
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        answer_embedding = answer_embedding.reshape(-1, image_embedding.shape[-1])
        if const.GENERAL_METRICS is False:
            if const.VOT_FITB_USE_CENTER is True:
                scores = centers.matmul(answer_embedding.transpose(0, 1))
                ans_index = torch.argmax(scores).item()
            else:
                scores = image_embedding.matmul(answer_embedding.transpose(0, 1))
                scores = scores.mean(dim=0)  # 内积越大越靠近
                ans_index = torch.argmax(scores).item()
        else:        
            ans_scores = []
            for i in range(answer_embedding.shape[0]):
                score = 0
                for j in range(image_embedding.shape[0]):
                    # 这个metrics也应该和内积一样，越大越好
                    score += self.metric_branch(answer_embedding[i] * image_embedding[j]).item()
                ans_scores.append(score)
            ans_index = np.argmax(ans_scores)
        return ans_index
    
    def get_embedding(self, sample):
        # image: 3 x 224 x 224
        sample['images'] = sample['images'].unsqueeze(dim=0)
        image_embedding = self.image_embedding(sample)
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        if hasattr(const, 'SCALE_FACTOR'):
            image_embedding = const.SCALE_FACTOR * image_embedding
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        return image_embedding