import torch
from src.base_networks import *
from torch import nn
from src import const
import os
import torchvision
from torch.nn import functional as F
from IPython import embed


def normalize(vector):
    dim = len(vector.shape) - 1
    return vector / (torch.norm(vector, p=2, dim=dim, keepdim=True) + 1e-10)


class SparseSoftmaxLoss(object):
    '''
    Xintong的loss
    lstm_input: [batch_size, seq_len, emb_size]
    lstm_pred: [batch_size, seq_len, emb_size]
    mask: [batch_size, seq_len]
    '''

    def __call__(self, origin_seq, lstm_pred, image_mask, is_training=True):
        origin_seq = origin_seq.reshape(-1, origin_seq.shape[-1])
        lstm_pred = lstm_pred.reshape(-1, lstm_pred.shape[-1])
        scores = lstm_pred.matmul(origin_seq.transpose(0, 1))
        # 不对，中间也可以算，输入输出又不是一个！这段注释掉。
        # diagonal = torch.diag(scores.diag())
        # scores = scores - diagonal # 中间的不算?【其实是可以算的】
        # 每个序列最后一个以及之后的无效
        row_valid = image_mask.clone()
        nozero = (row_valid == 0).sum(dim=1).detach().cpu().numpy()
        for row_index, i in enumerate(nozero):
            row_valid[row_index, row_valid.shape[1] - int(i) - 1] = 0
        mask = row_valid.reshape(-1, 1).matmul(image_mask.reshape(1, -1))
        if hasattr(const, "NO_MOVE") and const.NO_MOVE:
            scores = scores * mask
        else:
            # mask为0的地方置为很小的值？
            move = torch.zeros(mask.shape[0], mask.shape[1]).to(const.device)
            move.fill_(-10000)
            move = move * (1. - mask)
            move = move
            scores = scores * mask + move
        scores = F.log_softmax(scores, dim=1)
        if is_training is False:
            selected = []
            for i in range(scores.shape[0] - 1):
                selected.append(scores[i, i + 1].item() * mask[i, i + 1].item())
            selected.append(0.)
            selected = torch.tensor(selected).to(const.device)
            selected = selected.reshape(row_valid.shape[0], -1)
            # 这里如果加了move变量，那么“不可能的那些位置，必然还是-10000这种很小的值”
            selected = selected.sum(dim=1) / row_valid.sum(dim=1)
            every = selected
        else:
            every = None
        target = torch.tensor(list(range(1, scores.shape[0])) + [0]).to(const.device)
        return F.nll_loss(scores, target, size_average=False) / row_valid.sum(), every


class XintongNetwork(ModuleWithAttr):

    def __init__(self, pretrained_embeddings=None):
        super(XintongNetwork, self).__init__()
        self.text_embedding = AverageTextEmbedding(pretrained_embeddings)
        self.im2text_embedding = ImageEmbedding(const.WORD_EMBED_SIZE)  # 给text的embedding
        self.im2rnn_embedding = ImageEmbedding(const.IMAGE_EMBED_SIZE)  # 给rnn的embedding
        self.image_text_loss = InnerProductContrastiveLoss()
        self.f_lstm = nn.LSTM(input_size=const.IMAGE_EMBED_SIZE,
                              hidden_size=const.LSTM_HIDDEN_SIZE,
                              dropout=const.LSTM_DROPOUT,
                              batch_first=True,
                              )
        self.f_lstm_pred = nn.Linear(const.LSTM_HIDDEN_SIZE, const.IMAGE_EMBED_SIZE)
        self.b_lstm = nn.LSTM(input_size=const.IMAGE_EMBED_SIZE,
                              hidden_size=const.LSTM_HIDDEN_SIZE,
                              dropout=const.LSTM_DROPOUT,
                              batch_first=True,
                              )
        self.b_lstm_pred = nn.Linear(const.LSTM_HIDDEN_SIZE, const.IMAGE_EMBED_SIZE)

        self.sparse_softmax = SparseSoftmaxLoss()
        assert const.MAX_CATEGORY_NUM == 1

    def forward(self, sample):
        batch_size = sample['images'].shape[0]
        text_embedding = self.text_embedding(sample)
        image2text_embedding = self.im2text_embedding(sample['images'])
        im2rnn_embedding = self.im2rnn_embedding(sample['images'])
        f_lstm_hidden = self.f_lstm(im2rnn_embedding)[0]
        f_lstm_pred = self.f_lstm_pred(f_lstm_hidden)
        b_im2rnn_embedding = self.im2rnn_embedding(sample['b_images'])
        b_lstm_hidden = self.b_lstm(b_im2rnn_embedding)[0]
        b_lstm_pred = self.b_lstm_pred(b_lstm_hidden)

        output = {
            'text_embedding': text_embedding,
            'image2text_embedding': image2text_embedding,
            'im2rnn_embedding': im2rnn_embedding,
            'b_im2rnn_embedding': b_im2rnn_embedding,
            'f_lstm_pred': f_lstm_pred,
            'b_lstm_pred': b_lstm_pred,
        }
        return output

    def cal_loss(self, sample, output):
        text_mask = sample['word_mask']
        image_mask = sample['image_mask']
        b_image_mask = sample['b_image_mask']
        text_embedding = output['text_embedding']
        image2text_embedding = output['image2text_embedding']
        im2rnn_embedding = output['im2rnn_embedding']
        b_im2rnn_embedding = output['b_im2rnn_embedding']
        f_lstm_pred = output['f_lstm_pred']
        b_lstm_pred = output['b_lstm_pred']

        norm_text_embedding = normalize(text_embedding)
        norm_image2text_embedding = normalize(image2text_embedding)
        image_text_loss = self.image_text_loss(norm_text_embedding, norm_image2text_embedding, text_mask, image_mask)
        f_sparse_softmax_loss, f_outfit_comp_metrics = self.sparse_softmax(im2rnn_embedding, f_lstm_pred, image_mask, self.training)
        b_sparse_softmax_loss, b_outfit_comp_metrics = self.sparse_softmax(b_im2rnn_embedding, b_lstm_pred, b_image_mask, self.training)

        loss_structure = [
            ['image_text_loss', const.WEIGHT_IMAGE_TEXT, image_text_loss],
            ['f_sparse_softmax_loss', const.F_WEIGHT_SPARSE_SOFTMAX, f_sparse_softmax_loss],
            ['b_sparse_softmax_loss', const.B_WEIGHT_SPARSE_SOFTMAX, b_sparse_softmax_loss],
        ]

        all_loss = sum([loss_pair[1] * loss_pair[2] for loss_pair in loss_structure])
        if self.training:
            return {
                'structure': loss_structure,
                'all': all_loss,
                'outfit_comp_metrics': None,
            }
        else:
            return {
                'structure': loss_structure,
                'all': all_loss,
                'outfit_comp_metrics': const.F_WEIGHT_SPARSE_SOFTMAX * f_outfit_comp_metrics + const.B_WEIGHT_SPARSE_SOFTMAX * b_outfit_comp_metrics,
            }
    
    def fitb_ans(self, sample):
        def flip(x, dim):
            dim = x.dim() + dim if dim < 0 else dim
            inds = tuple(slice(None, None) if i != dim
                         else x.new(torch.arange(x.size(i) - 1, -1, -1).tolist()).long()
                         for i in range(x.dim()))
            return x[inds]
        question_nums = sample['images'].shape[0]
        # [num, emb_size]
        question_embeddings = self.im2rnn_embedding(sample['images'].unsqueeze(0))
        answer_embeddings = self.im2rnn_embedding(sample['answer_images'].unsqueeze(0))

        # blank_position严格从1开始数
        blank_position = sample['blank_position']
        if blank_position == question_nums + 1:
            f_lstm_hidden = self.f_lstm(question_embeddings)[0]
            f_lstm_pred = self.f_lstm_pred(f_lstm_hidden[:, question_nums - 1, :])
            f_scores = answer_embeddings.squeeze(0).matmul(f_lstm_pred.transpose(0, 1))
            f_scores = F.log_softmax(f_scores, dim=0)
            ans = f_scores.argmax().item()
        elif blank_position == 1:
            b_question_embeddings = flip(question_embeddings, 1)
            b_lstm_hidden = self.b_lstm(b_question_embeddings)[0]
            b_lstm_pred = self.b_lstm_pred(b_lstm_hidden[:, question_nums - 1, :])
            b_scores = answer_embeddings.squeeze(0).matmul(b_lstm_pred.transpose(0, 1))
            b_scores = F.log_softmax(b_scores, dim=0)
            ans = b_scores.argmax().item()
        else:
            f_lstm_hidden = self.f_lstm(question_embeddings)[0]
            f_lstm_pred = self.f_lstm_pred(f_lstm_hidden[:, blank_position - 2, :])
            f_scores = answer_embeddings.squeeze(0).matmul(f_lstm_pred.transpose(0, 1))
            f_scores = F.log_softmax(f_scores, dim=0)

            b_question_embeddings = flip(question_embeddings, 1)
            b_blank_position = question_nums - blank_position + 1 # 反转

            b_lstm_hidden = self.b_lstm(b_question_embeddings)[0]
            b_lstm_pred = self.b_lstm_pred(b_lstm_hidden[:, b_blank_position - 2, :])
            b_scores = answer_embeddings.squeeze(0).matmul(b_lstm_pred.transpose(0, 1))
            b_scores = F.log_softmax(b_scores, dim=0)

            scores = f_scores * const.F_WEIGHT_SPARSE_SOFTMAX + b_scores * const.B_WEIGHT_SPARSE_SOFTMAX
            ans = scores.argmax().item()
        return ans
    
    def fitb_ans_old(self, sample):
        '''
        sample['images']: [N, 3, h, w]
        sample['answer_images']: [N, 3, h, w]
        '''
        def flip(x, dim):
            dim = x.dim() + dim if dim < 0 else dim
            inds = tuple(slice(None, None) if i != dim
                         else x.new(torch.arange(x.size(i) - 1, -1, -1).tolist()).long()
                         for i in range(x.dim()))
            return x[inds]
        ans_num, _, h, w = sample['answer_images'].shape
        pos = sample['blank_position']
        feed_images = torch.cat([sample['images'][0:pos - 1, ...], torch.zeros(1, 3, h, w).to(const.device), sample['images'][pos - 1:, ...]])
        # 复制答案那么多份
        feed_images = torch.cat([feed_images.clone().unsqueeze(0) for _ in range(ans_num)])
        feed_images[:, pos - 1, ...] = sample['answer_images'].unsqueeze(0)
        # 对type做同样处理
        feed_image_types = torch.cat([sample['image_types'][0:pos - 1], torch.tensor([0]).to(const.device), sample['image_types'][pos - 1:]])
        feed_image_types = torch.cat([feed_image_types.clone().unsqueeze(0) for _ in range(ans_num)])
        feed_image_types[:, pos - 1] = sample['answer_types'].unsqueeze(0)
        image_mask = torch.ones(feed_images.shape[0], feed_images.shape[1]).to(const.device)
        b_feed_images = flip(feed_images, dim=1)
        b_feed_image_types = flip(feed_image_types, dim=1)

        x = image_mask.shape[0]
        y = image_mask.shape[1]

        feed_sample = {
            'images': feed_images,
            'image_mask': image_mask.float(),
            'types': feed_image_types,
            'b_images': b_feed_images,
            'b_image_mask': image_mask.float(),
            'b_types': b_feed_image_types,
            'word_ids': torch.zeros((x, y, const.OUTFIT_NAME_PAD_NUM), dtype=torch.long).to(const.device),
            'word_mask': torch.zeros((x, y)).float().to(const.device),
            'word_detail_mask': torch.zeros((x, y, const.OUTFIT_NAME_PAD_NUM)).float().to(const.device),
            'word_embedding_divider': torch.ones((x, y)).float().to(const.device),
        }
        output = self(feed_sample)
        loss = self.cal_loss(feed_sample, output)
        # print(loss['outfit_comp_metrics'])
        ans = loss['outfit_comp_metrics'].argmax().item()
        return ans


class ExpNetwork(ModuleWithAttr):

    def __init__(self, pretrained_embeddings=None):
        super(ExpNetwork, self).__init__()
        self.text_embedding = AverageTextEmbedding(pretrained_embeddings)
        self.im2text_embedding = ImageEmbedding(const.WORD_EMBED_SIZE)  # 给text的embedding
        self.im2rnn_embedding = ImageEmbedding(const.IMAGE_EMBED_SIZE)  # 给rnn的embedding
        self.image_text_loss = InnerProductContrastiveLoss()
        self.mask_embedding = nn.Embedding(const.MAX_CATEGORY_NUM, const.IMAGE_EMBED_SIZE)
        self.sparse_softmax = SparseSoftmaxLoss()
        self.f_lstm = nn.LSTM(input_size=const.IMAGE_EMBED_SIZE,
                              hidden_size=const.LSTM_HIDDEN_SIZE,
                              dropout=const.LSTM_DROPOUT)
        self.f_lstm_fc = nn.Linear(const.LSTM_HIDDEN_SIZE, const.IMAGE_EMBED_SIZE)

    def forward(self, sample):
        batch_size = sample['images'].shape[0]
        text_embedding = self.text_embedding(sample)
        image2text_embedding = self.im2text_embedding(sample['images'])
        im2rnn_embedding = self.im2rnn_embedding(sample['images'])
        masks = self.mask_embedding(sample['types'].reshape(-1))
        im2rnn_embedding_typed = masks * im2rnn_embedding.reshape(-1, const.IMAGE_EMBED_SIZE)
        im2rnn_embedding_typed = im2rnn_embedding_typed.reshape(batch_size, -1, const.IMAGE_EMBED_SIZE)
        f_lstm_hidden = self.f_lstm(im2rnn_embedding)[0]
        f_lstm_pred = self.f_lstm_fc(f_lstm_hidden)
        lstm_types = F.pad(sample['types'], [0, 1, 0, 0])[:, 1:].reshape(-1)
        lstm_masks = self.mask_embedding(lstm_types.reshape(-1))
        f_lstm_pred_typed = lstm_masks * f_lstm_pred.reshape(-1, const.IMAGE_EMBED_SIZE)
        f_lstm_pred_typed = f_lstm_pred_typed.reshape(batch_size, -1, const.IMAGE_EMBED_SIZE)

        output = {
            'text_embedding': text_embedding,
            'image2text_embedding': image2text_embedding,
            'im2rnn_embedding': im2rnn_embedding,
            'im2rnn_embedding_typed': im2rnn_embedding_typed,
            'f_lstm_pred': f_lstm_pred,
            'f_lstm_pred_typed': f_lstm_pred_typed,
        }
        return output

    def cal_loss(self, sample, output):
        text_mask = sample['word_mask']
        image_mask = sample['image_mask']
        text_embedding = output['text_embedding']
        image2text_embedding = output['image2text_embedding']
        im2rnn_embedding = output['im2rnn_embedding']
        im2rnn_embedding_typed = output['im2rnn_embedding_typed']
        f_lstm_pred_typed = output['f_lstm_pred_typed']

        norm_text_embedding = normalize(text_embedding)
        norm_image2text_embedding = normalize(image2text_embedding)
        image_text_loss = self.image_text_loss(norm_text_embedding, norm_image2text_embedding, text_mask, image_mask)
        sparse_softmax_loss, outfit_comp_metrics = self.sparse_softmax(im2rnn_embedding_typed, f_lstm_pred_typed, image_mask, self.training)
        mask_l1_loss = self.mask_embedding.weight.norm(1) / self.mask_embedding.weight.numel()

        loss_structure = [
            ['image_text_loss', const.WEIGHT_IMAGE_TEXT, image_text_loss],
            ['sparse_softmax_loss', const.WEIGHT_SPARSE_SOFTMAX, sparse_softmax_loss],
            ['mask_l1_loss', const.WEIGHT_L1_LOSS, mask_l1_loss],
        ]

        all_loss = sum([loss_pair[1] * loss_pair[2] for loss_pair in loss_structure])

        return {
            'structure': loss_structure,
            'all': all_loss,
            'outfit_comp_metrics': outfit_comp_metrics,
        }


class OLDExpNetwork(ModuleWithAttr):
    '''
    将图像和文字直接组合的尝试，效果不好，没有norm？建议放弃掉
    '''

    def __init__(self, pretrained_embeddings=None):
        super(ExpNetwork, self).__init__()
        self.text_embedding = AverageTextEmbedding(pretrained_embeddings)
        self.image_embedding = ImageEmbedding()
        self.image_text_loss = InnerProductContrastiveLoss()
        self.outfit_loss = OutfitSimpleCenterLoss()
        self.inter_center_product_loss = InterCenterProductLoss()

    def forward(self, sample):
        text_embedding = self.text_embedding(sample)
        image_embedding = self.image_embedding(sample)
        text_mask = sample['word_mask']
        image_mask = sample['image_mask']
        return {
            'text_embedding': text_embedding,
            'image_embedding': image_embedding,
            'text_mask': text_mask,
            'image_mask': image_mask,
        }

    def cal_loss(self, sample, output):
        text_embedding = output['text_embedding']
        image_embedding = output['image_embedding']
        text_mask = output['text_mask']
        image_mask = output['image_mask']

        # # 这里做了norm，可以考虑其他方法
        text_embedding = text_embedding / (torch.norm(text_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)

        line_text_embedding = text_embedding.reshape(-1, text_embedding.shape[2])
        line_image_embedding = image_embedding.reshape(-1, image_embedding.shape[2])
        line_text_mask = sample['word_mask'].reshape(-1)  # embedding1是否有效
        line_image_mask = sample['image_mask'].reshape(-1)  # embedding2是否有效

        # 给embedding 加 L2 loss
        norm_text_loss = ((text_embedding * text_mask.unsqueeze(2).float()) ** 2).sum() / text_mask.sum().float()
        norm_image_loss = ((image_embedding * image_mask.unsqueeze(2).float()) ** 2).sum() / image_mask.sum().float()

        # image embedding和text embedding一致
        image_text_loss = self.image_text_loss(line_text_embedding, line_image_embedding, line_text_mask, line_image_mask)
        outfit_center_loss_dict = self.outfit_loss(image_embedding, image_mask)
        outfit_center_loss = outfit_center_loss_dict['all']
        outfit_comp_metrics = -outfit_center_loss_dict['every']  # 因为标记为1的应该更大，所以取负号这里

        # center向量之间
        centers = (image_mask.unsqueeze(2).float() * image_embedding).sum(dim=1) / image_mask.sum(dim=1, keepdim=True).float()
        centers = centers.unsqueeze(dim=1)
        inter_center_product_loss = self.inter_center_product_loss(centers)

        loss_structure = [
            ['norm_text_loss', const.WEIGHT_NORM_TEXT, norm_text_loss],
            ['norm_image_loss', const.WEIGHT_NORM_IMAGE, norm_image_loss],
            ['image_text_loss', const.WEIGHT_IMAGE_TEXT, image_text_loss],
            ['outfit_center_loss', const.WEIGHT_OUTFIT_CENTER, outfit_center_loss],
            ['inter_center_product_loss', const.WEIGHT_INTER_PRODUCT, inter_center_product_loss],
        ]

        all_loss = sum([loss_pair[1] * loss_pair[2] for loss_pair in loss_structure])

        return {
            'structure': loss_structure,
            'all': all_loss,
            'outfit_comp_metrics': outfit_comp_metrics,
        }

    def fitb_ans(self, sample):
        image_embedding = self.image_embedding({'images': sample['images']})
        answer_embedding = self.image_embedding({'images': sample['answer_images']})
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        answer_embedding = answer_embedding.reshape(-1, image_embedding.shape[-1])
        center = image_embedding.mean(dim=0, keepdim=True)  # 这个网络的center是直接取mean
        diff = answer_embedding - center
        diff = (diff ** 2).sum(dim=1)
        ans_index = diff.argmin().item()
        return ans_index

    def get_embedding(self, sample):
                # image: 3 x 224 x 224
        sample['images'] = sample['images'].unsqueeze(dim=0)
        image_embedding = self.image_embedding(sample)
        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)
        if hasattr(const, 'SCALE_FACTOR'):
            image_embedding = const.SCALE_FACTOR * image_embedding
        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])
        return image_embedding
