import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torchvision
from src import const
from torch.nn import functional as F

class ModuleWithAttr(nn.Module):

    # 只能是数字，默认注册为0

    def __init__(self, extra_info=['step']):
        super(ModuleWithAttr, self).__init__()
        for key in extra_info:
            self.set_buffer(key, 0)

    def set_buffer(self, key, value):
        if not(hasattr(self, '__' + key)):
            self.register_buffer('__' + key, torch.tensor(value))
        setattr(self, '__' + key, torch.tensor(value))

    def get_buffer(self, key):
        if not(hasattr(self, '__' + key)):
            raise Exception('no such key!')
        return getattr(self, '__' + key).item()


class AverageTextEmbedding(ModuleWithAttr):

    def __init__(self, pretrained_embeddings=None):
        super(AverageTextEmbedding, self).__init__()
        self.embedding = nn.Embedding(const.MAX_VOCAB_SIZE + 1, const.WORD_EMBED_SIZE)
        if pretrained_embeddings is not None:
            self.embedding.weight.data.copy_(torch.from_numpy(pretrained_embeddings))

    def forward(self, sample):
        # word_id: [Batch_size, ITEM_PAD_NUM, NAME_PAD_NUM]
        # word_detail_mask: [Batch_size, ITEM_PAD_NUM, NAME_PAD_NUM]
        # word_embedding_divider: [Batch_size, ITEM_PAD_NUM]
        batch_size = sample['word_ids'].shape[0]
        x = sample['word_ids'].reshape(-1, const.OUTFIT_NAME_PAD_NUM)
        valid_mask = sample['word_detail_mask'].reshape(-1, const.OUTFIT_NAME_PAD_NUM)
        x = self.embedding(x)
        valid_mask = torch.unsqueeze(valid_mask, dim=2).float()
        x = x * valid_mask
        x = x.reshape(batch_size, const.OUTFIT_ITEM_PAD_NUM, const.OUTFIT_NAME_PAD_NUM, const.WORD_EMBED_SIZE)
        # average
        # [batch_size, const.OUTFIT_ITEM_PAD_NUM, const.WORD_EMBED_SIZE]
        x = torch.sum(x, dim=2) / torch.unsqueeze(sample['word_embedding_divider'], dim=2).float()
        return x


class ResNet18Extractor(ModuleWithAttr):

    def __init__(self):
        super(ResNet18Extractor, self).__init__()
        self.res18 = torchvision.models.resnet18(pretrained=True)

    def forward(self, x):
        ret = {}
        for name, layer in self.res18._modules.items():
            x = layer(x)
            if name == 'avgpool':
                return torch.squeeze(x)
        return ret


class ImageEmbedding(ModuleWithAttr):

    '''
    输入[batch_size, N, 3, 224, 224]
    输出[batch_size, N, const.IMAGE_EMBED_SIZE]
    '''

    def __init__(self, embed_size=None):
        super(ImageEmbedding, self).__init__()
        self.res18extractor = ResNet18Extractor()
        if embed_size is None:
            self.image_embedding = nn.Linear(512, const.IMAGE_EMBED_SIZE)
        else:
            self.image_embedding = nn.Linear(512, embed_size)

    def forward(self, sample):
        batch_size = sample['images'].shape[0]
        x = sample['images'].reshape(-1, 3, 224, 224)
        x = self.res18extractor(x)
        x = self.image_embedding(x)
        # [batch_size, OUTIT_ITEM_PAD_NUM, IMAGE_EMBED_SIZE]
        x = x.reshape(batch_size, -1, const.IMAGE_EMBED_SIZE)
        return x


class InnerProductContrastiveLoss(object):

    def __init__(self):
        n = max(const.BATCH_SIZE, const.VAL_BATCH_SIZE) * const.OUTFIT_ITEM_PAD_NUM
        self.eye = torch.eye(n, n).float().to(const.device)

    def __call__(self, embedding1, embedding2, mask1, mask2):
        scores = embedding1.matmul(embedding2.transpose(0, 1))
        diagonal = scores.diag().unsqueeze(1)
        scores_valid = mask1.unsqueeze(1).matmul(mask2.unsqueeze(0))
        diagonal_valid = scores_valid.diag().unsqueeze(1)
        loss_a = F.relu(const.EMBED_MARGIN - diagonal + scores)  # 就相当于max(0, x)了
        loss_b = F.relu(const.EMBED_MARGIN - diagonal.transpose(0, 1) + scores)
        valid_a = scores_valid * diagonal_valid
        eye = self.eye[0:scores.shape[0], 0:scores.shape[0]]
        valid_a = valid_a.float() * (1. - eye)  # 对角线上的必然不对
        valid_b = scores_valid * diagonal_valid.transpose(0, 1)
        valid_b = valid_b.float() * (1. - eye)  # 对角线上的必然不对
        loss_a = (loss_a * valid_a).sum() / valid_a.sum()
        loss_b = (loss_b * valid_b).sum() / valid_b.sum()
        return loss_a + loss_b


class OutfitSimpleCenterLoss(object):

    def __call__(self, image_embedding, image_mask):
        # 取平均（只对valid的平均），后期考虑取个weight？
        centers = (image_mask.unsqueeze(2).float() * image_embedding).sum(dim=1) / image_mask.sum(dim=1, keepdim=True).float()
        centers = centers.unsqueeze(dim=1)
        diff = image_mask.unsqueeze(2).float() * (image_embedding - centers)
        loss = (diff ** 2).sum() / image_mask.sum().float()
        # 每一个loss，给后面evaluate的时候用的
        every_loss = (diff ** 2).sum(dim=2).sum(dim=1) / image_mask.sum(dim=1).float()
        return {
            "all": loss.detach().cpu().numpy(),
            "every": every_loss,
        }


class InterCenterProductLoss(object):
    '''
    输入是(N, embedding_size)的东西    
    然后自己和自己做内积，挑出i != j的那些累加起来
    目标是越来越远，所以这个值应该是越小越好
    '''

    def __call__(self, embedding):
        embedding = embedding.reshape(-1, embedding.shape[-1])
        scores = embedding.matmul(embedding.transpose(0, 1))
        scores -= torch.diag(scores.diag())
        n = scores.shape[0]
        loss = scores.sum() / (n * n - n)
        return loss
