import numpy as np
import torch
from torch import nn
from lib.base.base_net import BaseNet
from torch.nn import functional as F
import copy
import sys
from matplotlib import pyplot as plt


class Maml(BaseNet):
    def __init__(self, cfg,maml_config):
        super().__init__(cfg)

        H, W = cfg.DATASET.IMAGE_SIZE
        channels = cfg.MODEL.INPUT_CHANNEL
        img = torch.randn(size=(1, channels, H, W))
        self.dim = self.backbone(img).flatten().shape[0]

        # 在train的时候因为只用到了train_net, 所以只会更新backbone和train的分类头

        self.train_linear = nn.Linear(self.dim, self.n_way['TRAIN'])
        self.train_net = nn.Sequential(self.backbone,
                                       nn.Flatten(),
                                       self.train_linear)
        #
        self.test_linear = nn.Linear(self.dim, self.n_way['TEST'])
        self.test_net = nn.Sequential(self.backbone,
                                      nn.Flatten(),
                                      self.test_linear)
        #
        self.validate_linear = nn.Linear(self.dim, self.n_way['VALIDATE'])
        self.validate_net = nn.Sequential(self.backbone,
                                          nn.Flatten(),
                                          self.validate_linear)

        self.nets = {
            'TRAIN': self.train_net,
            'TEST': self.test_net,
            'VALIDATE': self.validate_net,
        }

        self.update_step = maml_config['update_step']
        self.update_lr = maml_config['update_lr']

    def finetune(self, net, support_set, labels):
        optimizer = torch.optim.Adam(net.parameters(), lr=self.update_lr)

        for k in range(self.update_step):
            logits = net(support_set)

            loss = F.cross_entropy(logits, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        return list(net.parameters())

    def forward(self, s_q_set, type):
        # s_q_set: 10*600*1*84*84

        n_way = self.n_way[type]
        n_shot = self.n_shot[type]
        n_query = self.n_query[type]

        labels = (torch.tensor([i for i in range(n_way) for _ in range(n_shot)])
                  .to(torch.int64).to(s_q_set.device))

        logits_q = torch.zeros(size=(self.batch_size[type], n_way * n_query, n_way)).to(s_q_set.device)

        origin_wights = copy.deepcopy(list(self.nets[type].parameters()))

        for k, episode in enumerate(s_q_set):
            # episode: 600*1*28*28
            # support_set: 300*1*28
            support_set = episode[:n_way * n_shot, :, :, :]

            query_set = episode[n_way * n_shot:, :, :, :]
            net = copy.deepcopy(self.nets[type])

            # finetune the model
            weights = self.finetune(net, support_set, labels)

            # self.nets[type]的参数确实被修改了, 这里没问题
            for i, para in enumerate(self.nets[type].parameters()):
                para.data = weights[i].data

            logits_q[k] += (self.nets[type](query_set))

            for i, para in enumerate(self.nets[type].parameters()):
                para.data = origin_wights[i].data

        logits_q = logits_q.flatten(0, 1)

        return logits_q
