# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F

from .backbone import get_backbone


class projection_MLP(nn.Module):
    def __init__(self, in_dim, out_dim, num_layers=2):
        super().__init__()
        hidden_dim = out_dim
        self.num_layers = num_layers
        self.layer1 = nn.Sequential(
            nn.Linear(in_dim, hidden_dim),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(inplace=True)
        )
        self.layer2 = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(inplace=True)
        )
        self.layer3 = nn.Sequential(
            nn.Linear(hidden_dim, out_dim),
            nn.BatchNorm1d(out_dim, affine=False)  # Page:5, Paragraph:2
        )

    def forward(self, x):
        if self.num_layers == 2:
            x = self.layer1(x)
            x = self.layer3(x)
        elif self.num_layers == 3:
            x = self.layer1(x)
            x = self.layer2(x)
            x = self.layer3(x)

        return x


class MoCoV2_Model(nn.Module):
    def __init__(self, backbone, out_dim=128, queue_size=65536, momentum=0.999, temperature=0.07):
        '''
        MoCoV2 model, taken from: https://github.com/facebookresearch/moco.
        Adapted for use in personal Boilerplate for unsupervised/self-supervised contrastive learning.
        Additionally, too inspiration from: https://github.com/HobbitLong/CMC.
        Args:
            init:
                args (dict): Program arguments/commandline arguments.
                queue_size (int): Length of the queue/memory, number of samples to store in memory. (default: 65536)
                momentum (float): Momentum value for updating the key_encoder. (default: 0.999)
                temperature (float): Temperature used in the InfoNCE / NT_Xent contrastive losses. (default: 0.07)
            forward:
                x_q (Tensor): Reprentation of view intended for the query_encoder.
                x_k (Tensor): Reprentation of view intended for the key_encoder.
        returns:
            logit (Tensor): Positve and negative logits computed as by InfoNCE loss. (bsz, queue_size + 1)
            label (Tensor): Labels of the positve and negative logits to be used in softmax cross entropy. (bsz, 1)
        '''
        super(MoCoV2_Model, self).__init__()

        self.queue_size = queue_size
        self.momentum = momentum
        self.temperature = temperature

        # assert self.queue_size % args.batch_size == 0  # for simplicity

        self.encoder_q = get_backbone(backbone)  # 初始化backbone ,
        self.encoder_k = get_backbone(backbone)  # q 是不进行参数更新的,由动量更新而来

        dim_mlp = self.encoder_q.fc.weight.shape[1]  # 添加mlp头。

        self.encoder_q.fc = projection_MLP(in_dim=dim_mlp, out_dim=out_dim, num_layers=2)
        self.encoder_k.fc = projection_MLP(in_dim=dim_mlp, out_dim=out_dim, num_layers=2)

        for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):  # 初始化参数
            param_k.data.copy_(param_q.data)
            param_k.requires_grad = False

        self.register_buffer("queue", torch.randn(self.queue_size, out_dim))        # 创建队列以存储负样本

        self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))  # 创建指针以在入队和出队时存储队列中的当前位置

    @torch.no_grad()
    def momentum_update(self):
        """
        动量更新模型
        """
        for p_q, p_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
            p_k.data = p_k.data * self.momentum + p_q.detach().data * (1. - self.momentum)

    @torch.no_grad()
    def shuffled_idx(self, batch_size):
        shuffled_idxs = torch.randperm(batch_size).long().cuda()
        reverse_idxs = torch.zeros(batch_size).long().cuda()
        value = torch.arange(batch_size).long().cuda()
        reverse_idxs.index_copy_(0, shuffled_idxs, value)
        return shuffled_idxs, reverse_idxs

    @torch.no_grad()
    def update_queue(self, feat_k):
        batch_size = feat_k.size(0)
        ptr = int(self.queue_ptr)
        self.queue[ptr:ptr + batch_size, :] = feat_k
        ptr = (ptr + batch_size) % self.queue_size
        self.queue_ptr[0] = ptr

    def InfoNCE_logits(self, f_q, f_k):
        '''
        Compute the similarity logits between positive samples and positve to all negatives in the memory.
        计算记忆中正样本和正样本与所有负样本之间的相似度逻辑。
        args:
            f_q (Tensor): Feature reprentations of the view x_q computed by the query_encoder.
            f_k (Tensor): Feature reprentations of the view x_k computed by the key_encoder.
        returns:
            logit (Tensor): Positve and negative logits computed as by InfoNCE loss. (bsz, queue_size + 1)
            label (Tensor): Labels of the positve and negative logits to be used in softmax cross entropy. (bsz, 1)
        '''
        f_k = f_k.detach()
        f_mem = self.queue.clone().detach()  # Get queue from register_buffer
        # Normalize the feature representations
        f_q = nn.functional.normalize(f_q, dim=1)
        f_k = nn.functional.normalize(f_k, dim=1)
        f_mem = nn.functional.normalize(f_mem, dim=1)

        # Compute sim between positive views.计算两者之间的相似度
        pos = torch.bmm(f_q.view(f_q.size(0), 1, -1),f_k.view(f_k.size(0), -1, 1)).squeeze(-1)
        # Compute sim between postive and all negatives in the memory
        neg = torch.mm(f_q, f_mem.transpose(1, 0))
        logits = torch.cat((pos, neg), dim=1)
        logits /= self.temperature
        labels = torch.zeros(logits.shape[0],dtype=torch.long).cuda()  # Create labels, first logit is postive, all others are negative

        return logits, labels

    def forward(self, x_q, x_k):
        """
        mocov2前向传播过程
        """
        batch_size = x_q.size(0)
        # Feature representations of the query view from the query encoder
        feat_q = self.encoder_q(x_q)
        # TODO: shuffle ids with distributed data parallel
        # Get shuffled and reversed indexes for the current minibatch
        shuffled_idxs, reverse_idxs = self.shuffled_idx(batch_size)

        with torch.no_grad():
            self.momentum_update()  # Update the key encoder
            x_k = x_k[shuffled_idxs]  # Shuffle minibatch
            # Feature representations of the shuffled key view from the key encoder
            feat_k = self.encoder_k(x_k)
            # reverse the shuffled samples to original position
            feat_k = feat_k[reverse_idxs]
        # Compute the logits for the InfoNCE contrastive loss.
        logit, label = self.InfoNCE_logits(feat_q, feat_k)
        # Update the queue/memory with the current key_encoder minibatch.
        self.update_queue(feat_k)
        return logit, label


if __name__ == '__main__':
    x_q = torch.randn(2, 3, 32, 32)
    x_k = torch.randn(2, 3, 32, 32)

    class args:
        dataset = "cifar10"
        model = "resnet18"
        batch_size = 128

    model = MoCoV2_Model(args)
    logit, label = model(x_q, x_k)

    print(logit.shape, label.shape)

    # torch.save({
    #     "model": model
    # }, 'test.pth')

    checkpoint = torch.load('test.pth', map_location="cpu")
    # base_encoder=getattr(models, args.model)(args, num_classes=128)
    base_encoder = checkpoint['model'].encoder_q_backbone
    # print(base_encoder)

    # pretrained_dict=checkpoint['model']
    # model_dict = base_encoder.state_dict()
    # # 1. filter out unnecessary keys
    # for k, v in pretrained_dict.items():
    #     print(k)
    # pretrained_dict = { k: v  for k, v in pretrained_dict.items() if k in model_dict}
    # print(pretrained_dict)
    # # 2. overwrite entries in the existing state dict
    # model_dict.update(pretrained_dict)
    # base_encoder.load_state_dict(model_dict)
    #
    y = base_encoder(x_q)
    print(y.shape)
    print(logit.shape)
    print(label.shape)
