import torch
from src.base_networks import *
from torch import nn
from src import const
import os
import torchvision
from torch.nn import functional as F
import numpy as np
from src.resnet import resnet18
from IPython import embed

class FieldAwareEmbedding(nn.Module):
    
    def __init__(self):
        super(FieldAwareEmbedding, self).__init__()
        self.FIELD_EMBED_SIZE = int(const.IMAGE_EMBED_SIZE / const.MAX_CATEGORY_NUM)
        assert const.IMAGE_EMBED_SIZE % const.MAX_CATEGORY_NUM == 0
#         self.image_feature = ResNet18Extractor()
#         self.general_embedding = nn.Linear(512, const.IMAGE_EMBED_SIZE)
        self.general_embedding = resnet18(pretrained=True, embedding_size=const.IMAGE_EMBED_SIZE)
        if const.LEARNED_FIELD_EMBED is True:
            self.mask_fcs = nn.ModuleList([nn.Linear(
                const.IMAGE_EMBED_SIZE, 
                self.FIELD_EMBED_SIZE,
                bias=const.LEARNED_FIELD_BIAS) for i in range(const.MAX_CATEGORY_NUM)]
            )
        else:
            mask_fcs = []
            for i in range(const.MAX_CATEGORY_NUM):
                weight = np.zeros((self.FIELD_EMBED_SIZE, const.IMAGE_EMBED_SIZE))
                weight[:, i*self.FIELD_EMBED_SIZE:(i+1)*self.FIELD_EMBED_SIZE] = np.eye(self.FIELD_EMBED_SIZE)
                fc = nn.Linear(const.IMAGE_EMBED_SIZE, self.FIELD_EMBED_SIZE, bias=False)
                fc.weight = torch.nn.Parameter(torch.Tensor(weight), requires_grad=False)
                mask_fcs.append(fc)
            self.mask_fcs = nn.ModuleList(mask_fcs)
    
    def forward(self, images):
        x = self.general_embedding(images)
        if hasattr(const, "NORM_GENERAL"):
            x = x / (torch.norm(x, p=2, dim=1) + 1e-10).unsqueeze(1)
        return x
    
    def cal_dist(self, general_emb1, general_emb2, types1, types2, metrics=None):
        masked1 = []
        masked2 = []
        for i in range(general_emb1.shape[0]):
            type1 = int(types1[i].item())
            type2 = int(types2[i].item())
            if hasattr(const, 'NO_SWAP_TYPE') and const.NO_SWAP_TYPE is True:
                '''
                不交换Type
                '''
                masked_emb1 = self.mask_fcs[type1](general_emb1[i, :])
                masked_emb2 = self.mask_fcs[type2](general_emb2[i, :])
            else:
                masked_emb1 = self.mask_fcs[type2](general_emb1[i, :])  # type2 field
                masked_emb2 = self.mask_fcs[type1](general_emb2[i, :])  # type1 field
            masked1.append(masked_emb1.unsqueeze(0))
            masked2.append(masked_emb2.unsqueeze(0))
        masked1 = torch.cat(masked1)
        masked2 = torch.cat(masked2)
        if hasattr(const, "NORM_PART") and const.NORM_PART:
            masked1 = masked1 / (torch.norm(masked1, p=2, dim=1) + 1e-10).unsqueeze(1)
            masked2 = masked2 / (torch.norm(masked2, p=2, dim=1) + 1e-10).unsqueeze(1)
        if metrics is None:
            dist = F.pairwise_distance(masked1, masked2, p=2)
        else:
            dist = metrics(masked1 * masked2)
        return dist
    
    def get_mask_L1_norm(self):
        loss = []
        for i in range(const.MAX_CATEGORY_NUM):
            loss.append(self.mask_fcs[i].weight.norm(1))
        loss = sum(loss)
        loss = loss / const.MAX_CATEGORY_NUM
        loss = loss / (self.mask_fcs[0].weight.shape[0] * self.mask_fcs[0].weight.shape[1])
        return loss

class ExpTripletNetwork(ModuleWithAttr):
    
    def __init__(self, pretrained_embeddings=None):
        super(ExpTripletNetwork, self).__init__()
        self.fw_embedding = FieldAwareEmbedding()
        self.metric_branch = None
        if const.LEARNED_METRICS:
            n = self.fw_embedding.FIELD_EMBED_SIZE
            self.metric_branch = nn.Linear(n, 1, bias=False)
            '''
            这里原来是zeros。。绝对有问题
            '''
            weight = torch.ones(1, n)
            if hasattr(const, 'LEARNED_METRICS_INIT'):
                weight.fill_(const.LEARNED_METRICS_INIT)
                
            self.metric_branch.weight = nn.Parameter(weight)
        self.criterion = nn.MarginRankingLoss(margin=const.TRIPLET_MARGIN)
    
    def forward(self, sample):
        (
            raw_image1, image1, text_ids1, text_mask1, embedding_divider1, item_type1,
            raw_image2, image2, text_ids2, text_mask2, embedding_divider2, item_type2,
            raw_image3, image3, text_ids3, text_mask3, embedding_divider3, item_type3,
        ) = sample
        general_emb_1 = self.fw_embedding(image1)
        general_emb_2 = self.fw_embedding(image2)
        general_emb_3 = self.fw_embedding(image3)
        return general_emb_1, general_emb_2, general_emb_3
    
    def cal_compatibility_score(self, images, types):
        '''
        images: [N, 3, h, w]
        types: [N]
        '''
        if isinstance(types, torch.Tensor):
            types = types.detach().cpu().numpy().tolist()
        n = images.shape[0]
        general_embeddings = []
        for i in range(n):
            general_embeddings.append(self.fw_embedding(images[i].unsqueeze(0)))
                
        dist = 0.
        for i in range(n - 1):
            for j in range(i + 1, n):
                dist += self.fw_embedding.cal_dist(
                    general_embeddings[i].unsqueeze(0),
                    general_embeddings[j].unsqueeze(0),
                    torch.tensor([types[i]]).to(const.device),
                    torch.tensor([types[j]]).to(const.device),
                    self.metric_branch,
                )
        return dist.mean().item()
    
    def cal_ans(self, images, types, ans_images, ans_types):
        '''
        images: [N, 3, h, w]
        types: [N]
        ans_images: [M, 3, h, w]
        ans_types: [M]
        '''
        if isinstance(types, torch.Tensor):
            types = types.detach().cpu().numpy().tolist()
        if isinstance(ans_types, torch.Tensor):
            ans_types = ans_types.detach().cpu().numpy().tolist()
        ans_embeddings = []
        for i in range(ans_images.shape[0]):
            ans_embeddings.append(self.fw_embedding(ans_images[i].unsqueeze(0)))
        embeddings = []
        for j in range(images.shape[0]):
            embeddings.append(self.fw_embedding(images[j].unsqueeze(0)))
        dist = []
        for i in range(ans_images.shape[0]):
            tmp = []
            for j in range(images.shape[0]):
                tmp.append(self.fw_embedding.cal_dist(
                    ans_embeddings[i].unsqueeze(0),
                    embeddings[j].unsqueeze(0),
                    torch.tensor([ans_types[i]]).to(const.device),
                    torch.tensor([types[j]]).to(const.device),
                    self.metric_branch,
                ).mean().item())
            dist.append(tmp)
        dist = np.array(dist)
        return np.argmin(dist.mean(axis=1)), dist
    
    def cal_loss(self, sample, output):
        (
            raw_image1, image1, text_ids1, text_mask1, embedding_divider1, item_type1,
            raw_image2, image2, text_ids2, text_mask2, embedding_divider2, item_type2,
            raw_image3, image3, text_ids3, text_mask3, embedding_divider3, item_type3,
        ) = sample
        general_emb_1, general_emb_2, general_emb_3 = output

        '''
        x: anchor
        y: far
        z: close
        '''
        general_emb_x, types_x = general_emb_1, item_type1
        general_emb_y, types_y = general_emb_3, item_type3
        general_emb_z, types_z = general_emb_2, item_type2
        
        # Triplet Loss
        dist_a = self.fw_embedding.cal_dist(general_emb_x, general_emb_y, types_x, types_y, self.metric_branch)  # 应该比较大
        dist_b = self.fw_embedding.cal_dist(general_emb_x, general_emb_z, types_x, types_z, self.metric_branch)  # 应该比较小
        target = torch.FloatTensor(dist_a.size()).fill_(1)
        target = target.to(const.device)
        loss_triplet = self.criterion(dist_a, dist_b, target)
        
        # Mask L1 Loss
        loss_l1_mask = self.fw_embedding.get_mask_L1_norm()
        
        # Embedding L2 loss
        loss_l2_general_emb = (general_emb_x.norm(2) + general_emb_y.norm(2) + general_emb_z.norm(2)) / 3
        loss_l2_general_emb = loss_l2_general_emb / (general_emb_x.shape[0] * general_emb_x.shape[1])
        
        loss_structure = [
            ['loss_triplet', const.WEIGHT_TRIPLET, loss_triplet],
            ['loss_l1_mask', const.WEIGHT_L1_MASK, loss_l1_mask],
            ['loss_l2_general_emb', const.WEIGHT_L2_GENERAL_EMB, loss_l2_general_emb],
        ]

        all_loss = sum([loss_pair[1] * loss_pair[2] for loss_pair in loss_structure])

        return {
            'structure': loss_structure,
            'all': all_loss,
        }
