import torch
from torch import nn
from lib.base.base_net import BaseNet
import copy
from torch.nn import functional as F
from lib.optimizer import OPTIMIZER


class Experiment1(BaseNet):
    def __init__(self, cfg, ):
        super().__init__(cfg)

        part_num = 3

        self.dim = self.get_backbone_last_dim()

        self.reduce_net = nn.Linear(self.dim, self.dim // part_num)

        self.train_linear = nn.Linear((self.dim // part_num) * part_num, self.class_num)
        self.train_linear.bias.data.fill_(0)

        self.test_linear = nn.Linear((self.dim // part_num) * part_num, self.n_way['TEST'])
        self.test_linear.bias.data.fill_(0)

    def finetune(self, linear, episode, labels, type):
        # episode: 100*3*3*84*84
        # support_set: 25*3*3*84*84
        support_set = episode[:self.n_way[type] * self.n_shot[type]]
        # query_set: 75*3*3*84*84
        query_set = episode[self.n_way[type] * self.n_shot[type]:]
        # print(linear.weight.data)
        copy_linear = copy.deepcopy(linear)
        optimizer = OPTIMIZER[self.cfg.TEST.FINETUNE.OPTIMIZER](self.cfg.TEST.FINETUNE, copy_linear)

        s_shape = support_set.shape
        support_set = support_set.view(s_shape[0] * s_shape[1], s_shape[2], s_shape[3], s_shape[4])

        q_shape = query_set.shape
        query_set = query_set.view(q_shape[0] * q_shape[1], q_shape[2], q_shape[3], q_shape[4])


        for k in range(self.cfg.TEST.FINETUNE.EPOCH):
            feature = self.backbone(support_set)
            feature = feature.flatten(1, -1)
            feature = self.reduce_net(feature)
            feature = feature.view(s_shape[0], s_shape[1], -1)
            feature = feature.flatten(1, -1)
            logits = copy_linear(feature)
            loss = F.cross_entropy(logits, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        feature = self.backbone(query_set)
        feature = feature.flatten(1, -1)
        feature = self.reduce_net(feature)
        feature = feature.view(q_shape[0], q_shape[1], -1)
        feature = feature.flatten(1, -1)
        q_logits = copy_linear(feature)
        return q_logits

    def forward(self, data, type):
        # data: 16*3*3*84*84

        if type == 'TRAIN' or type == 'VALIDATE':

            shape = data.shape
            data = data.view(shape[0] * shape[1], shape[2], shape[3], shape[4])
            feature = self.backbone(data)
            feature = feature.flatten(1, -1)
            feature = self.reduce_net(feature)
            feature = feature.view(shape[0], shape[1], -1)
            feature = feature.flatten(1, -1)
            logits = self.train_linear(feature)

            return logits

        else:

            logits_q = []

            labels = torch.tensor(
                [i for i in range(self.n_way['TEST']) for _ in range(self.n_shot['TEST'])]
            ).to(torch.int64).to(data.device)

            for k, episode in enumerate(data):
                # episode: 100*3*3*84*84
                # support_set: 25*3*3*84*84
                # finetune the model
                logits_q.append(self.finetune(self.test_linear, episode, labels, 'TEST'))

            logits_q = torch.concat(logits_q, dim=0)
            # logits_q = logits_q.flatten(0, 1)

            return logits_q
