import torch
from torch import nn
from lib.base.base_net import BaseNet
import copy
from torch.nn import functional as F
from lib.optimizer import OPTIMIZER


class Experiment4(BaseNet):
    def __init__(self, cfg, ):
        super().__init__(cfg)

        part_num = 3

        self.dim = self.get_backbone_last_dim()

        self.reduce_net = nn.Linear(self.dim, self.dim // part_num)

        self.train_linear = nn.Linear((self.dim // part_num) * part_num, self.class_num)
        self.train_linear.bias.data.fill_(0)

        self.test_linear = nn.Linear((self.dim // part_num) * part_num, self.n_way['TEST'])
        self.test_linear.bias.data.fill_(0)

    def negative_Loss(self, q_logits, comp_index):
        comp_prob = 1 - torch.softmax(q_logits, dim=-1)
        comp_prob = torch.softmax(comp_prob, dim=-1)
        log_comp_prob = torch.log(comp_prob)
        loss1 = F.nll_loss(log_comp_prob, comp_index.flatten())
        loss2 = -torch.mean(torch.diag(comp_prob @ log_comp_prob.T))
        loss = loss1 + loss2
        return loss

    def negative_learning(self, linear, query_set):
        optimizer = OPTIMIZER[self.cfg.TEST.FINETUNE.OPTIMIZER](self.cfg.TEST.FINETUNE, linear)

        q_shape = query_set.shape
        query_set = query_set.view(q_shape[0] * q_shape[1], q_shape[2], q_shape[3], q_shape[4])

        feature = self.backbone(query_set)
        feature = feature.flatten(1, -1)
        feature = self.reduce_net(feature)
        feature = feature.view(q_shape[0], q_shape[1], -1)
        # 15*dim
        feature = feature.flatten(1, -1)

        labels = [[i for i in range(self.n_way['TEST'])] for _ in range(q_shape[0])]

        for i in range(4):
            # comp_index0 = None
            for _ in range(100):
                # 75*5
                q_logits = linear(feature)
                index = torch.tensor(labels).to(q_logits.device)
                q_logits = torch.gather(q_logits, 1, index)
                # 75*1
                comp_index = torch.argmin(q_logits, dim=-1, keepdim=True)
                comp_label = torch.gather(index, 1, comp_index).flatten()
                # 75*5
                loss = self.negative_Loss(q_logits, comp_index)
                optimizer.zero_grad()
                loss.backward(retain_graph=True)
                optimizer.step()

            for i, j in enumerate(comp_label):
                labels[i].remove(j.item())

        return torch.tensor(labels).flatten().to(query_set.device)

    def finetune(self, linear, episode, labels, type):

        for param in self.backbone.parameters():
            param.requires_grad = False
        for param in self.reduce_net.parameters():
            param.requires_grad = False

        # episode: 100*3*3*84*84
        # support_set: 25*3*3*84*84
        support_set = episode[:self.n_way[type] * self.n_shot[type]]
        # query_set: 75*3*3*84*84
        query_set = episode[self.n_way[type] * self.n_shot[type]:]
        # print(linear.weight.data)
        copy_linear = copy.deepcopy(linear)
        optimizer = OPTIMIZER[self.cfg.TEST.FINETUNE.OPTIMIZER](self.cfg.TEST.FINETUNE, copy_linear)

        s_shape = support_set.shape
        support_set = support_set.view(s_shape[0] * s_shape[1], s_shape[2], s_shape[3], s_shape[4])

        # q_shape = query_set.shape
        # query_set = query_set.view(q_shape[0] * q_shape[1], q_shape[2], q_shape[3], q_shape[4])

        for k in range(self.cfg.TEST.FINETUNE.EPOCH):
            feature = self.backbone(support_set)
            feature = feature.flatten(1, -1)
            feature = self.reduce_net(feature)
            feature = feature.view(s_shape[0], s_shape[1], -1)
            feature = feature.flatten(1, -1)
            logits = copy_linear(feature)
            loss = F.cross_entropy(logits, labels)
            optimizer.zero_grad()
            loss.backward(retain_graph=True)
            optimizer.step()

        pred_labels = self.negative_learning(copy_linear, query_set)

        for param in self.backbone.parameters():
            param.requires_grad = True
        for param in self.reduce_net.parameters():
            param.requires_grad = True

        return pred_labels

    def forward(self, data, type):

        pred_labels = []

        labels = torch.tensor(
            [i for i in range(self.n_way['TEST']) for _ in range(self.n_shot['TEST'])]
        ).to(torch.int64).to(data.device)

        for k, episode in enumerate(data):
            # episode: 100*3*3*84*84
            # support_set: 25*3*3*84*84
            # finetune the model
            pred_labels.append(self.finetune(self.test_linear, episode, labels, 'TEST'))

        pred_labels = torch.concat(pred_labels, dim=0)

        return pred_labels
