import torch
from torch import nn
from lib.backbone import BCAKBONE
from abc import abstractmethod
import copy
from lib.optimizer import OPTIMIZER
from torch.nn import functional as F
import numpy as np
import sys


# all new models should inherit this class !!!
class BaseNet(nn.Module):
    def __init__(self, cfg):
        super().__init__()

        self.cfg = cfg

        self.n_way = {'TRAIN': cfg.TRAIN.N_WAY, 'VALIDATE': cfg.VALIDATE.N_WAY, 'TEST': cfg.TEST.N_WAY}
        self.n_shot = {'TRAIN': cfg.TRAIN.N_SHOT, 'VALIDATE': cfg.VALIDATE.N_SHOT, 'TEST': cfg.TEST.N_SHOT}
        self.n_query = {'TRAIN': cfg.TRAIN.N_QUERY, 'VALIDATE': cfg.VALIDATE.N_QUERY, 'TEST': cfg.TEST.N_QUERY}
        self.batch_size = {'TRAIN': cfg.TRAIN.BATCH_SIZE, 'VALIDATE': cfg.VALIDATE.BATCH_SIZE,
                           'TEST': cfg.TEST.BATCH_SIZE}

        self.class_num = (self.cfg.DATASET.TRAIN_CLASS_NUM +
                          self.cfg.DATASET.TEST_CLASS_NUM +
                          self.cfg.DATASET.VALID_CLASS_NUM)

        self.backbone = BCAKBONE[cfg.MODEL.BACKBONE](cfg)

        self.pre_train = cfg.MODEL.PRE_TRAINED_PATH
        if self.pre_train != '':
            self.backbone.load_state_dict(torch.load(self.pre_train))
            print(f'the pre-trained model of {cfg.MODEL.BACKBONE} has been loaded over !')

    def get_backbone_last_dim(self):
        H, W = self.cfg.DATASET.IMAGE_SIZE
        channels = self.cfg.MODEL.INPUT_CHANNEL
        img = torch.randn(size=(1, channels, H, W))
        dim = self.backbone(img).flatten().shape[0]
        return dim

    def finetune(self, linear, episode, labels, type):

        support_set = episode[:self.n_way[type] * self.n_shot[type]]
        query_set = episode[self.n_way[type] * self.n_shot[type]:]
        # print(linear.weight.data)
        copy_linear = copy.deepcopy(linear)
        optimizer = OPTIMIZER[self.cfg.TEST.FINETUNE.OPTIMIZER](self.cfg.TEST.FINETUNE, copy_linear)

        for param in self.backbone.parameters():
            param.requires_grad = False

        for k in range(self.cfg.TEST.FINETUNE.EPOCH):
            feature = self.backbone(support_set)
            feature = feature.flatten(1, -1)
            logits = copy_linear(feature)
            loss = F.cross_entropy(logits, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        feature = self.backbone(query_set)
        feature = feature.flatten(1, -1)
        q_logits = copy_linear(feature)

        for param in self.backbone.parameters():
            param.requires_grad = True

        return q_logits

    @abstractmethod
    def forward(self, s_q_set, type):
        pass
