import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import random
import torch.nn.functional as F
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import copy
from torch.autograd import Variable
from model import VanillaBert
import math
from collections import OrderedDict
from tqdm import trange

def to_var(x, requires_grad=True):
    # if torch.cuda.is_available():
    #     x = x.cuda()
    return Variable(x, requires_grad=requires_grad)


def named_params(curr_module=None, memo=None, prefix=''):
    if memo is None:
        memo = set()

    if hasattr(curr_module, 'named_leaves'):  # 如果有"named_leaves", 名字返回为prefix.name, par
        for name, p in curr_module.named_leaves():
            if p is not None and p not in memo and p.requires_grad:
                memo.add(p)
                yield prefix + ('.' if prefix else '') + name, p
    else:
        for name, p in curr_module._parameters.items():  # 如果有参数， 返回XXX,
            if p is not None and p not in memo and p.requires_grad:
                memo.add(p)
                yield prefix + ('.' if prefix else '') + name, p

    for mname, module in curr_module.named_children():  # 如果没有直接的参数，则到下一级各个子模块去寻找
        submodule_prefix = prefix + ('.' if prefix else '') + mname
        for name, p in named_params(module, memo, submodule_prefix):
            yield name, p


def params(model):
    for name, param in named_params(model):
        yield param

import pdb
def update_params(model, lr_inner, first_order=False, source_params=None, detach=False):
    if source_params is not None:
        print("source_params is not None")
        pdb.set_trace()
        for tgt, src in zip(named_params(model), source_params):
            name_t, param_t = tgt
            # name_s, param_s = src
            # grad = param_s.grad
            # name_s, param_s = src
            grad = src
            if first_order:
                grad = to_var(grad.detach().data)
            tmp = param_t - lr_inner * grad
            set_param(model, name_t, tmp)
    else:
        print("else")
        pdb.set_trace()
        for name, param in named_params(model):
            if not detach:
                grad = param.grad
                if first_order:
                    grad = to_var(grad.detach().data)
                tmp = param - lr_inner * grad
                set_param(model, name, tmp)
            else:
                param = param.detach_()
                set_param(model, name, param)

def set_param(curr_mod, name, param):
    if '.' in name:
        n = name.split('.')
        module_name = n[0]
        rest = '.'.join(n[1:])
        for name, mod in curr_mod.named_children():
            if module_name == name:
                set_param(mod, rest, param)
                break
    else:
        if hasattr(curr_mod, 'flatten_parameters'):
            curr_mod._apply(lambda x: x)
        curr_mod._parameters[name] = param


def detach_params(model):
    for name, param in named_params(model):
        set_param(model, name, param.detach())


def copy_model(model, other, same_var=False):
    for name, param in other.named_params():
        if not same_var:
            param = to_var(param.data.clone(), requires_grad=True)
        set_param(model, name, param)

def update_module(module, new_module=None, updates=None, memo=None):
    r"""
    [[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/utils.py)

    **Description**

    Updates the parameters of a module in-place, in a way that preserves differentiability.

    The parameters of the module are swapped with their update values, according to:
    \[
    p \gets p + u,
    \]
    where \(p\) is the parameter, and \(u\) is its corresponding update.


    **Arguments**

    * **module** (Module) - The module to update.
    * **updates** (list, *optional*, default=None) - A list of gradients for each parameter
        of the model. If None, will use the tensors in .update attributes.

    **Example**
    ~~~python
    error = loss(model(X), y)
    grads = torch.autograd.grad(
        error,
        model.parameters(),
        create_graph=True,
    )
    updates = [-lr * g for g in grads]
    l2l.update_module(model, updates=updates)
    ~~~
    """
    if new_module is None:
        new_module = module.clone()

    if memo is None:
        memo = {}
    if updates is not None:
        params = list(new_module.parameters())
        if not len(updates) == len(list(params)):
            msg = 'WARNING:update_module(): Parameters and updates have different length. ('
            msg += str(len(params)) + ' vs ' + str(len(updates)) + ')'
            print(msg)
        for p, g in zip(params, updates):
            p.update = g

    # Update the params
    for param_key in module._parameters:
        p = module._parameters[param_key]
        if p is not None and hasattr(p, 'update') and p.update is not None:
            if p in memo:
                new_module._parameters[param_key] = memo[p]
            else:
                updated = p + p.update
                memo[p] = updated
                new_module._parameters[param_key] = updated

    # Second, handle the buffers if necessary
    for buffer_key in module._buffers:
        buff = module._buffers[buffer_key]
        if buff is not None and hasattr(buff, 'update') and buff.update is not None:
            if buff in memo:
                new_module._buffers[buffer_key] = memo[buff]
            else:
                updated = buff + buff.update
                memo[buff] = updated
                new_module._buffers[buffer_key] = updated

    # Then, recurse for each submodule
    for module_key in module._modules:
        new_module._modules[module_key] = update_module(
            module._modules[module_key],
            new_module._modules[module_key],
            updates=None,
            memo=memo,
        )

    # Finally, rebuild the flattened parameters for RNNs
    # See this issue for more details:
    # https://github.com/learnables/learn2learn/issues/139
    if hasattr(module, 'flatten_parameters'):
        new_module._apply(lambda x: x)
    return new_module

def update_current_devices(model, new_device):
    if len(model._modules)==0:
        return
    if hasattr(model, 'device'):
        model.device = new_device
        for mod in model._modules:
            update_current_devices(model._modules[mod], new_device)
    else:
        return

def Similarity(grad_dicts_1, grad_dicts_2):
    sim_list = []
    for name in list(grad_dicts_1.keys()):
        assert name in grad_dicts_2
        sim = (grad_dicts_1[name] * grad_dicts_2[name]).sum()
        sim_list.append(sim)
    sum_sim = torch.stack(sim_list).sum()
    return sum_sim

class InstanceReweighting(nn.Module):
    def __init__(self, model: VanillaBert, weak_set, few_shot_set,
                 weak_set_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, scale_lr4model=1e-3, coeff4expandset=1.0, max_few_shot_size=20,
                 batch_size=5):
        super(InstanceReweighting, self).__init__()
        self.model = model
        self.convey_fn = convey_fn
        if convey_fn is not None:
            self.weak_set = convey_fn(weak_set)
            few_shot_set = self.convey_fn(few_shot_set)
        else:
            self.weak_set = weak_set
        self.weak_set_size = len(self.weak_set) if weak_set is not None else 0
        self.weak_set_weights = weak_set_weights
        self.weak_set_label = weak_set_label
        self.few_shot_set = few_shot_set
        self.expand_data_list = None
        self.expand_idxs = exp_idxs
        self.lr4model = lr4model
        self.max_few_shot_size = max_few_shot_size
        self.coefff4expandset = coeff4expandset
        self.scale_lr4model = scale_lr4model
        self.batch_size = batch_size
        self.device = self.model.device
        self.to(self.device)

    def ConstructExpandData(self, batch_size=100):
        if not hasattr(self, "expand_idxs"):
            print("instance does not contain expand_idxs!")
            return
        if not isinstance(self.expand_idxs, list):
            print("self.expand_idxs is not a list instance!")
            return
        if len(self.expand_idxs) == 0:
            print("self.expand_idxs is not an empty object!")
            return
        self.expand_data_list = [self.weak_set.collate_raw_batch(
                [self.weak_set[idx] for idx in self.expand_idxs[i:min(i+batch_size, len(self.expand_idxs))] ]
        ) for i in range(0, len(self.expand_idxs), batch_size)]

    def SampleBatch(self, indices=None, batch_size=-1, dataset=None):
        batch_size = self.batch_size if batch_size == -1 else batch_size
        if dataset is None:
            if indices is None:
                indices = random.sample(list(range(len(self.weak_set))), batch_size)
            items = [self.weak_set[idx] for idx in indices]
            batch = self.weak_set.collate_raw_batch(items)
        else:
            if indices is None:
                indices = random.sample(list(range(len(dataset))), batch_size)
            items = [dataset[idx] for idx in indices]
            batch = dataset.collate_raw_batch(items)
        return batch, torch.tensor(indices, dtype=torch.int64).to(self.device)

    def ModelLoss(self, batch, model: VanillaBert):
        preds = model.predict(batch)
        epsilon = torch.ones([len(preds), 3], dtype=torch.float32, device=model.device) * 1e-8
        preds = (preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0],
        # which leads to the 'nan' value in log operation
        labels = batch[-2].to(model.device)
        loss = F.nll_loss(preds.log(), labels, reduction='none')
        return loss

    def ModelGrads(self, batch, weights, model: VanillaBert, create_graph=True):
        loss = self.ModelLoss(batch, model)
        cost = torch.sum(loss * weights)
        grads = torch.autograd.grad(cost, params(model),
                                    create_graph=create_graph,
                                    retain_graph=True)
        return grads

    def ComputeGrads4Weights(self, step, batch, weights, tmp_model: VanillaBert, few_shot_data):
        v_weights = to_var(weights)
        model_grads = self.ModelGrads(batch, v_weights, tmp_model)
        update_params(tmp_model, self.lr4model, source_params=model_grads)
        loss, acc = tmp_model.lossAndAcc(few_shot_data)
        print('####Few Shot %3d | %3d ####, loss/acc = %6.8f/%6.7f' % (
            step, self.weak_set_size, loss, acc
        ))
        torch.cuda.empty_cache()
        grad_weights = torch.autograd.grad(loss, v_weights, only_inputs=True,
                                            create_graph=False, retain_graph=False)[0]
        torch.cuda.empty_cache()
        return grad_weights, loss

def WeightedAcc(y_true, y_pred, weights):
    diff = y_true - y_pred
    diff = diff.__eq__(0).long() - diff.__ne__(0).long()
    if all([w==0 for w in weights]):
        weights += 1
    weights = weights/weights.abs().sum()
    sorted_idxs = weights.argsort()
    half_high = len(weights)//2
    topK_idx = sorted_idxs[-half_high:]
    pos_cnt = (weights>0).sum()
    pos_idx = sorted_idxs[-pos_cnt:]
    topK_acc = accuracy_score(y_true[topK_idx].cpu(), y_pred[topK_idx].cpu())
    pos_acc = accuracy_score(y_true[pos_idx].cpu(), y_pred[pos_idx].cpu())
    return (diff*weights).sum(), topK_acc, pos_acc

class InstanceReweightingV2(nn.Module):
    def __init__(self, model: VanillaBert, weak_set, few_shot_set,
                 weak_set_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, coeff4expandset=1.0, max_few_shot_size=20,
                 batch_size=5):
        super(InstanceReweightingV2, self).__init__()
        self.model = model
        self.convey_fn = convey_fn
        if convey_fn is not None:
            self.weak_set = convey_fn(weak_set)
            few_shot_set = self.convey_fn(few_shot_set)
        else:
            self.weak_set = weak_set
        self.weak_set_size = len(self.weak_set) if weak_set is not None else 0
        self.weak_set_weights = weak_set_weights
        self.weak_set_label = weak_set_label
        self.few_shot_set = few_shot_set
        self.expand_data_list = None
        self.expand_idxs = exp_idxs
        self.lr4model = lr4model
        self.coefff4expandset = coeff4expandset
        self.model_optim = torch.optim.SGD([
            {'params': self.model.parameters(), 'lr': self.lr4model}
        ])
        self.batch_size = batch_size
        self.max_few_shot_size = max_few_shot_size
        self.device = self.model.device
        self.to(self.device)

    def ConstructExpandData(self, batch_size=100):
        if not hasattr(self, "expand_idxs"):
            print("instance does not contain expand_idxs!")
            return
        if not isinstance(self.expand_idxs, list):
            print("self.expand_idxs is not a list instance!")
            return
        if len(self.expand_idxs) == 0:
            print("self.expand_idxs is not an empty object!")
            return
        self.expand_data_list = [self.weak_set.collate_raw_batch(
                [self.weak_set[idx] for idx in self.expand_idxs[i:min(i+batch_size, len(self.expand_idxs))] ]
        ) for i in range(0, len(self.expand_idxs), batch_size)]

    def InnerBatch(self, indices=None, batch_size=-1, dataset=None, device=None):
        batch_size = self.batch_size if batch_size == -1 else batch_size
        if dataset is None:
            dataset = self.weak_set
        if indices is None:
            indices = random.sample(list(range(len(dataset))), batch_size)
        items = [dataset[idx] for idx in indices]
        batch = dataset.collate_raw_batch(items)
        device = self.device if device is None else device
        return batch, torch.tensor(indices, dtype=torch.int64, device=device)

    def LossList(self, batch):
        preds = self.model.predict(batch)
        epsilon = torch.ones([len(preds), preds.size(1)],
                             dtype=torch.float32, device=self.device) * 1e-8
        preds = (preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0],
        # which leads to the 'nan' value in log operation
        loss = F.nll_loss(preds.log(), batch[3].to(self.device), reduce=False)
        return loss

    def GradientsSim(self, batch, mean_grad_dict):
        loss = self.LossList(batch)
        labels = batch[3].to(self.device)
        mask_tensor = torch.eye(len(labels), device=self.device)
        grad_sim = []
        for i in range(len(labels)):
            self.model.zero_grad()
            retain_graph = True if i != (len(labels) - 1) else False
            (mask_tensor[i] * loss).sum().backward(retain_graph = retain_graph)
            grad_dict = {n: p.grad.clone() for n, p in self.model.named_parameters()}
            grad_sim.append(Similarity(grad_dict, mean_grad_dict))
        return grad_sim

    def meanGradOnValSet(self, few_shot_data=None, few_shot_data_list=None):
        self.model.zero_grad()
        assert few_shot_data is not None or few_shot_data_list is not None
        if few_shot_data_list is None:
            loss, acc = self.model.lossAndAcc(few_shot_data)
            loss.backward()
            grad_dicts = {n: p.grad.clone() for n, p in self.model.named_parameters()}
            loss = loss.data.item()
        else:
            print("-------> few shot data list ------>")
            f_loss_list, f_acc_list = [], []
            for i, few_data in enumerate(few_shot_data_list):
                f_loss, f_acc = self.model.lossAndAcc(few_data)
                f_loss.backward()
                f_loss_list.append(f_loss.data.item())
                f_acc_list.append(f_acc.item())
                torch.cuda.empty_cache()
            loss, acc = np.mean(f_loss_list), np.mean(f_acc_list)
            grad_dicts = {n: p.grad.clone()/(i+1) for n, p in self.model.named_parameters()}
        print('####Few Shot ####, loss/acc = %6.8f/%6.7f' % (loss, acc))
        return grad_dicts, loss, acc

    def SumUpGradDelta(self, grad_dic, mean_grad_dic, GradStdSum):
        # grad_delta = (grad_dic - mean_grad_dic).pow(2)
        keys = list(grad_dic.keys())
        grad_delta = {name:(grad_dic[name]-mean_grad_dic[name]).pow(2) for name in keys}
        if len(GradStdSum) == 0: # empty dict
            return grad_delta
        GradStdSum = {name:GradStdSum[name]+grad_delta[name] for name in keys}
        return GradStdSum

    def GradStdSum(self, batch, mean_grad_dic):
        labels = batch[3].to(self.device)
        loss = self.LossList(batch)
        GradStdSum = {}
        mask_tensor = torch.eye(len(labels), device=self.device)
        for i in range(len(labels)):
            self.model.zero_grad()
            retain_graph = True if i != (len(labels) - 1) else False
            (mask_tensor[i] * loss).sum().backward(retain_graph = retain_graph)
            grad_dict = {n: p.grad.clone() for n, p in self.model.named_parameters()}
            GradStdSum = self.SumUpGradDelta(grad_dict, mean_grad_dic, GradStdSum)
        return GradStdSum

    def GSNRGradOnValSet(self, few_shot_data=None, few_shot_data_list=None):
        mean_grad_dict, f_loss, f_acc = self.meanGradOnValSet(few_shot_data, few_shot_data_list)
        keys = list(mean_grad_dict.keys())
        if few_shot_data_list is None:
            GradStdSum = self.GradStdSum(few_shot_data, mean_grad_dict)
        else:
            print("-------> few shot data list ------>")
            GradStdSum = {}
            for i, few_data in enumerate(few_shot_data_list[1:]):
                tmp = self.GradStdSum(few_data, mean_grad_dict)
                GradStdSum = tmp if len(GradStdSum) == 0 else {name:GradStdSum[name]+tmp[name] for name in keys}
        GradStdSum = {name:GradStdSum[name]*1.0/len(self.few_shot_set) for name in keys}
        GSNR_dict = {name:mean_grad_dict[name].pow(2)/GradStdSum[name] for name in keys}
        return GSNR_dict, mean_grad_dict, GradStdSum, f_loss, f_acc

    def ClipValGradient(self, few_shot_data=None, few_shot_data_list=None, topk_ratio=0.1):
        GSNR_Dict, mean_grad_dict, _, f_loss, f_acc = self.GSNRGradOnValSet(few_shot_data, few_shot_data_list)
        gsnr_list = torch.cat([GSNR_Dict[name].reshape([-1])
                               for name in list(GSNR_Dict.keys()) if not "embedding" in name])
        gsnr_list = gsnr_list[~torch.isnan(gsnr_list)]
        threshold = gsnr_list.topk(int(len(gsnr_list)*topk_ratio))[0].min()
        self.val_grad_dicts = {name:torch.where(mean_grad_dict[name]>threshold,
                                                mean_grad_dict[name],
                                                torch.full_like(mean_grad_dict[name], 0))
                               for name in list(mean_grad_dict.keys())}
        return f_loss, f_acc

    def ClipValGradientV2(self, few_shot_data=None, few_shot_data_list=None, topk_ratio=0.5):
        _, mean_grad_dict, std_grad_dict, f_loss, f_acc = self.GSNRGradOnValSet(few_shot_data, few_shot_data_list)
        GSNR_Dict = {name:mean_grad_dict[name].pow(2).sum()/std_grad_dict[name].sum()
                                                for name in list(mean_grad_dict.keys())}
        gsnr_list = torch.stack(list(GSNR_Dict.values()))
        threshold = gsnr_list.topk(int(len(gsnr_list)*topk_ratio))[0].min()
        self.val_grad_dicts = {name: torch.full_like(mean_grad_dict[name], 0) if GSNR_Dict[name] > threshold \
                                        else mean_grad_dict[name]
                               for name in list(mean_grad_dict.keys())}
        return f_loss, f_acc

    def ClipValGradientV3(self, few_shot_data=None, few_shot_data_list=None, topk_ratio=0.1):
        _, mean_grad_dict, std_grad_dict, f_loss, f_acc = self.GSNRGradOnValSet(few_shot_data, few_shot_data_list)
        GSNR_Dict_frac_1 = {}
        GSNR_Dict_frac_2 = {}
        for name in list(mean_grad_dict.keys()):
            layer_name, _ = name.rsplit(".", 1)
            if layer_name in GSNR_Dict_frac_1:
                GSNR_Dict_frac_1[layer_name] = mean_grad_dict[name].pow(2).sum() + GSNR_Dict_frac_1[layer_name]
                GSNR_Dict_frac_2[layer_name] = std_grad_dict[name].sum() + GSNR_Dict_frac_2[layer_name]
            else:
                GSNR_Dict_frac_1[layer_name] = mean_grad_dict[name].pow(2).sum()
                GSNR_Dict_frac_2[layer_name] = std_grad_dict[name].sum()
        GSNR_Dict = {layer_name:GSNR_Dict_frac_1[layer_name]/GSNR_Dict_frac_2[layer_name]
                        for layer_name in list(GSNR_Dict_frac_2.keys())}
        gsnr_list = torch.stack(list(GSNR_Dict.values()))
        threshold = gsnr_list.topk(math.ceil(len(gsnr_list)*topk_ratio))[0].min()
        self.val_grad_dicts = {name: torch.full_like(mean_grad_dict[name], 0) if GSNR_Dict[name.rsplit(".", 1)[0]] > threshold \
                                        else mean_grad_dict[name]
                               for name in list(mean_grad_dict.keys())}
        return f_loss, f_acc

    def ComputeGrads4Weights(self, batch, few_shot_data=None,
                                            few_shot_data_list=None):
        assert few_shot_data is not None or few_shot_data_list is not None
        if not hasattr(self, "val_grad_dicts") or self.val_grad_dicts is None:
            self.val_grad_dicts, few_loss, few_acc = self.meanGradOnValSet(few_shot_data=few_shot_data,
                                                                          few_shot_data_list=few_shot_data_list)
        sim_list = self.GradientsSim(batch, self.val_grad_dicts)
        return torch.tensor(sim_list, device=self.device)

class Data_Loss_Grad_Utils:
    def __init__(self, class_num=2, lr4model=2e-2, coeff4expandset=1.0, max_few_shot_size=20,
                 Inner_BatchSize=5):
        self.expand_data_list = None
        self.lr4model = lr4model
        self.coefff4expandset = coeff4expandset
        self.batch_size = Inner_BatchSize
        self.max_few_shot_size = max_few_shot_size
        self.device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
        self.class_num = class_num

    def FewShotDataList(self, few_shot_set):
        # all classes share the averaged weights, so the sum of weighted loss will be equals to the mean loss
        # so we do not need to discount the loss when accumurating them
        self.f_label_weight = torch.tensor([1.0 / len(few_shot_set) for _ in range(self.class_num)],
                                           device=self.device)
        if len(few_shot_set) > self.max_few_shot_size:
            few_shot_data = None
            few_shot_data_list = [few_shot_set.collate_raw_batch(
                                            [few_shot_set[j] for j in range(i,
                                                                                 min(i+self.max_few_shot_size,
                                                                                     len(few_shot_set)))])
                                            for i in range(0, len(few_shot_set), self.max_few_shot_size)]
        else:
            few_shot_data = few_shot_set.collate_raw_batch(
                [few_shot_set[i] for i in range(len(few_shot_set))]
            )
            few_shot_data_list = None
        return few_shot_data, few_shot_data_list

    def InnerBatch(self, dataset, indices=None, batch_size=-1, device=None):
        batch_size = self.batch_size if batch_size == -1 else batch_size
        if indices is None:
            indices = random.sample(list(range(len(dataset))), batch_size)
        items = [dataset[idx] for idx in indices]
        batch = dataset.collate_raw_batch(items)
        device = self.device if device is None else device
        return batch, torch.tensor(indices, dtype=torch.int64, device=device)

    def LossList(self, model, batch):
        preds = model.predict(batch)
        # ------------------------------------------------------------#
        # to prevent extremely confident prediction, i.e., [1.0, 0.0],
        # as it can lead to a 'nan' value in log operation
        epsilon = torch.ones([len(preds), self.class_num], dtype=torch.float32, device=self.device) * 1e-8
        preds = (preds - epsilon).abs()
        # -------------------------------------------------------------#
        loss = F.nll_loss(preds.log(), batch[-2].to(self.device), reduce=False)
        return loss

    def GradientsSim(self, model, batch, mean_grad_dict):
        loss = self.LossList(model, batch)
        labels = batch[-2].to(self.device)
        mask_tensor = torch.eye(len(labels), device=self.device)
        grad_sim = []
        for i in range(len(labels)):
            model.zero_grad()
            retain_graph = True if i != (len(labels) - 1) else False
            (mask_tensor[i] * loss).sum().backward(retain_graph=retain_graph)
            grad_dict = {n: p.grad.clone() for n, p in model.named_parameters()}
            grad_sim.append(Similarity(grad_dict, mean_grad_dict))
        return grad_sim

    def accuracy_score(self, label_tensor:torch.Tensor, pred_tensor:torch.Tensor):
        assert len(label_tensor) == len(pred_tensor)
        assert label_tensor.dim() == 1
        assert pred_tensor.dim() == 1
        if label_tensor.device != pred_tensor.device:
            label_tensor = label_tensor.to(pred_tensor.device)
        acc_tensor = (label_tensor - pred_tensor).__eq__(0).float().sum() / len(label_tensor)
        return acc_tensor.data.item()

    def lossAndAcc(self, model, batch, label_weight=None, reduction=None):
        preds = model.predict(batch)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        labels = batch[-2]
        labels = labels.argmax(dim=1) if labels.dim() == 2 else labels
        loss = F.nll_loss(preds.log(), labels.to(preds.device), weight=label_weight, reduction=reduction)
        acc = self.accuracy_score(labels, preds.data.argmax(dim=1))
        return loss, acc

    def meanGradOnValSet(self, model, few_shot_data=None, few_shot_data_list=None):
        model.zero_grad()
        assert few_shot_data is not None or few_shot_data_list is not None
        assert hasattr(self, "f_label_weight")
        if few_shot_data_list is None:
            loss, acc = self.lossAndAcc(model, few_shot_data, label_weight=self.f_label_weight, reduction='sum')
            loss.backward()
            loss = loss.data.item()
        else:
            print("-------> few shot data list ------>")
            f_loss_list, f_acc_list = [], []
            for i, few_data in enumerate(few_shot_data_list):
                f_loss, f_acc = self.lossAndAcc(model, few_data, label_weight=self.f_label_weight, reduction='sum')
                f_loss.backward()
                f_loss_list.append(f_loss.data.item())
                f_acc_list.append(f_acc)
                torch.cuda.empty_cache()
            loss, acc = np.sum(f_loss_list), np.mean(f_acc_list)
        grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
        return grad_dicts, loss, acc

class InstanceReweightingV3(Data_Loss_Grad_Utils):
    def __init__(self, class_num=2, lr4model=2e-2, coeff4expandset=1.0, max_few_shot_size=20,
                 Inner_BatchSize=5):
        super(InstanceReweightingV3, self).__init__(class_num, lr4model, coeff4expandset, max_few_shot_size, Inner_BatchSize)

    def weightsLR(self, eta=0.01, grad_weights=None):
        return eta / grad_weights.abs().mean()

    def ComputeGrads4Weights(self, model, batch, few_shot_data=None,
                                            few_shot_data_list=None):
        assert few_shot_data is not None or few_shot_data_list is not None
        if not hasattr(self, "val_grad_dicts") or self.val_grad_dicts is None:
            self.val_grad_dicts, few_loss, few_acc = self.meanGradOnValSet(model, few_shot_data=few_shot_data,
                                                                          few_shot_data_list=few_shot_data_list)
        sim_list = self.GradientsSim(model, batch, self.val_grad_dicts)
        return torch.tensor(sim_list, device=self.device)

class InstanceReweightingV4(InstanceReweightingV3):
    def __init__(self, class_num=2, lr4model=2e-2, coeff4expandset=1.0, max_few_shot_size=20,
                 Inner_BatchSize=5):
        super(InstanceReweightingV4, self).__init__(class_num, lr4model, coeff4expandset, max_few_shot_size,
                                                    Inner_BatchSize)

    def ComputeGrads4Weights(self, model:nn.Module, batch, few_shot_data=None,
                                            few_shot_data_list=None):
        assert few_shot_data is not None or few_shot_data_list is not None
        if not hasattr(self, "val_grad_dicts") or self.val_grad_dicts is None:
            self.val_grad_dicts, few_loss, few_acc = self.meanGradOnValSet(model, few_shot_data=few_shot_data,
                                                                          few_shot_data_list=few_shot_data_list)
        if not hasattr(self, "epsilon"):
            self.epsilon = 1e-5

        for name, par in model.named_parameters():
            par.data = par.data + (self.epsilon) * (self.val_grad_dicts[name])
        with torch.no_grad():
            loss_1 = self.LossList(model, batch)

        for name, par in model.named_parameters():
            par.data = par.data - 2* self.epsilon * self.val_grad_dicts[name]
        with torch.no_grad():
            loss_2 = self.LossList(model, batch)

        for name, par in model.named_parameters():
            par.data = par.data + self.epsilon * self.val_grad_dicts[name]

        grad = (loss_1 - loss_2)/(2*self.epsilon)
        return grad

class GSNRInstanceReweighting(InstanceReweightingV3):
    def __init__(self, class_num=2, lr4model=2e-2, coeff4expandset=1.0, max_few_shot_size=20,
                 Inner_BatchSize=5):
        super(GSNRInstanceReweighting, self).__init__(class_num, lr4model, coeff4expandset, max_few_shot_size,
                                                      Inner_BatchSize)

    def SumUpGradDelta(self, grad_dic, mean_grad_dic, GradStdSum):
        keys = list(grad_dic.keys())
        grad_delta = {name: (grad_dic[name] - mean_grad_dic[name]).pow(2) for name in keys}
        if len(GradStdSum) == 0:  # empty dict
            return grad_delta
        GradStdSum = {name: GradStdSum[name] + grad_delta[name] for name in keys}
        return GradStdSum

    def GradStdSum(self, model, batch, mean_grad_dic):
        labels = batch[-2].to(self.device)
        loss = self.LossList(model, batch)
        GradStdSum = {}
        mask_tensor = torch.eye(len(labels), device=self.device)
        for i in range(len(labels)):
            model.zero_grad()
            retain_graph = True if i != (len(labels) - 1) else False
            (mask_tensor[i] * loss).sum().backward(retain_graph=retain_graph)
            grad_dict = {n: p.grad.clone() for n, p in model.named_parameters()}
            GradStdSum = self.SumUpGradDelta(grad_dict, mean_grad_dic, GradStdSum)
        return GradStdSum

    def GSNRGradOnValSet(self, model, few_shot_data=None, few_shot_data_list=None):
        mean_grad_dict, f_loss, f_acc = self.meanGradOnValSet(model, few_shot_data, few_shot_data_list)
        keys = list(mean_grad_dict.keys())
        if few_shot_data_list is None:
            GradStdSum = self.GradStdSum(model, few_shot_data, mean_grad_dict)
        else:
            print("-------> few shot data list ------>")
            GradStdSum = {}
            for i, few_data in enumerate(
                    few_shot_data_list[1:]):  # the first batch is computed in the first line of this function
                tmp = self.GradStdSum(model, few_data, mean_grad_dict)
                GradStdSum = tmp if len(GradStdSum) == 0 else {name: GradStdSum[name] + tmp[name] for name in keys}
        ValSetSize = len(few_shot_data[-2]) if few_shot_data is not None else \
            sum([len(batch[-2]) for batch in few_shot_data_list])
        # batch[-2] is the label tensor, and len(batch[-2]) is the size of the batch

        GradStdSum = {name: GradStdSum[name] * 1.0 / ValSetSize for name in keys}
        GSNR_dict = {name: mean_grad_dict[name].pow(2) / GradStdSum[name] for name in keys}
        return GSNR_dict, mean_grad_dict, GradStdSum, f_loss, f_acc

    def ClipValGradient(self, model, few_shot_data=None, few_shot_data_list=None, topk_ratio=0.1):
        GSNR_Dict, mean_grad_dict, _, f_loss, f_acc = self.GSNRGradOnValSet(model, few_shot_data, few_shot_data_list)
        gsnr_list = torch.cat([GSNR_Dict[name].reshape([-1])
                               for name in list(GSNR_Dict.keys()) if not "embedding" in name])
        gsnr_list = gsnr_list[~torch.isnan(gsnr_list)]
        threshold = gsnr_list.topk(int(len(gsnr_list)*topk_ratio))[0].min()
        self.val_grad_dicts = {name:torch.where(mean_grad_dict[name]>threshold,
                                                mean_grad_dict[name],
                                                torch.full_like(mean_grad_dict[name], 0))
                               for name in list(mean_grad_dict.keys())}
        return f_loss, f_acc

    def ClipValGradientV2(self, model, few_shot_data=None, few_shot_data_list=None, topk_ratio=0.5):
        _, mean_grad_dict, std_grad_dict, f_loss, f_acc = self.GSNRGradOnValSet(model, few_shot_data, few_shot_data_list)
        GSNR_Dict = {name:mean_grad_dict[name].pow(2).sum()/std_grad_dict[name].sum()
                                                for name in list(mean_grad_dict.keys())}
        gsnr_list = torch.stack(list(GSNR_Dict.values()))
        threshold = gsnr_list.topk(int(len(gsnr_list)*topk_ratio))[0].min()
        self.val_grad_dicts = {name: torch.full_like(mean_grad_dict[name], 0) if GSNR_Dict[name] > threshold \
                                        else mean_grad_dict[name]
                               for name in list(mean_grad_dict.keys())}
        return f_loss, f_acc

    def ClipValGradientV3(self, model, few_shot_data=None, few_shot_data_list=None, topk_ratio=0.1):
        _, mean_grad_dict, std_grad_dict, f_loss, f_acc = self.GSNRGradOnValSet(model, few_shot_data, few_shot_data_list)
        GSNR_Dict_frac_1 = {}
        GSNR_Dict_frac_2 = {}
        for name in list(mean_grad_dict.keys()):
            layer_name, _ = name.rsplit(".", 1)
            if layer_name in GSNR_Dict_frac_1:
                GSNR_Dict_frac_1[layer_name] = mean_grad_dict[name].pow(2).sum() + GSNR_Dict_frac_1[layer_name]
                GSNR_Dict_frac_2[layer_name] = std_grad_dict[name].sum() + GSNR_Dict_frac_2[layer_name]
            else:
                GSNR_Dict_frac_1[layer_name] = mean_grad_dict[name].pow(2).sum()
                GSNR_Dict_frac_2[layer_name] = std_grad_dict[name].sum()
        GSNR_Dict = {layer_name:GSNR_Dict_frac_1[layer_name]/GSNR_Dict_frac_2[layer_name]
                        for layer_name in list(GSNR_Dict_frac_2.keys())}
        gsnr_list = torch.stack(list(GSNR_Dict.values()))
        threshold = gsnr_list.topk(math.ceil(len(gsnr_list)*topk_ratio))[0].min()
        self.val_grad_dicts = {name: torch.full_like(mean_grad_dict[name], 0) if GSNR_Dict[name.rsplit(".", 1)[0]] > threshold \
                                        else mean_grad_dict[name]
                               for name in list(mean_grad_dict.keys())}
        return f_loss, f_acc

class MetaEvaluatorV3(GSNRInstanceReweighting):
    def __init__(self, lr4model=2e-5, coeff4expandset=1.0, max_few_shot_size=20,
                    class_num=3, Inner_BatchSize=5):
        super(MetaEvaluatorV3, self).__init__(class_num, lr4model, coeff4expandset, max_few_shot_size, Inner_BatchSize)

    def ExpandValidIdxs(self, indices, valid_idxs, expand_idxs=[], pop_ratio=0.5):
        popOUT_v = int(self.batch_size * pop_ratio)
        _, idxs = self.weak_set_weights[indices].sort()
        valid_idxs.extend(indices[idxs[-popOUT_v:]].tolist())
        return expand_idxs, valid_idxs

    def PopOut(self, model, model_optim, valid_set, weak_set, max_meta_step=5, pop_ratio=0.5, topk_ratio=0.5):
        expand_idxs, valid_idxs = [], []
        c_idxs = list(range(len(weak_set)))
        self.weak_set_size = len(c_idxs)
        few_shot_data, few_shot_data_list = self.FewShotDataList(valid_set)
        self.weak_set_weights = torch.zeros(len(weak_set), device=self.device)
        self.weight_grads = torch.zeros(len(weak_set), device=self.device)
        shuffled_indices = random.sample(c_idxs,
                                         len(c_idxs)) * 2
        init_state_dicts = model.state_dict()
        self.ClipValGradientV2(model, few_shot_data, few_shot_data_list, topk_ratio=topk_ratio)
        for epoch in range(max_meta_step):
            for step in trange(0, len(c_idxs), self.batch_size):
                indices = shuffled_indices[step:step + self.batch_size]
                batch, indices = self.InnerBatch(weak_set, indices, device=self.device)
                grads = self.ComputeGrads4Weights(model, batch, few_shot_data, few_shot_data_list)
                self.weight_grads[indices] = grads
                expand_idxs, valid_idxs = self.ExpandValidIdxs(indices, valid_idxs,
                                                               expand_idxs=expand_idxs,
                                                               pop_ratio=pop_ratio)
            eta = self.weightsLR(0.05, self.weight_grads)
            self.weak_set_weights = self.weak_set_weights + eta*self.weight_grads
            if epoch + 1 != max_meta_step:
                model.load_state_dict(init_state_dicts)
                model.zero_grad()
                for i in range(0, len(c_idxs), self.batch_size):
                    indices = c_idxs[i:min(len(c_idxs), i+self.batch_size)]
                    batch, indices = self.InnerBatch(weak_set, indices, device=self.device)
                    loss = self.LossList(model,batch)
                    sum_loss = (self.weak_set_weights[indices]*loss).sum()
                    sum_loss.backward()
                model_optim.step()
        return valid_idxs

    def Evaluate(self, model, model_optim, valid_set, weak_set, weight_eta, max_meta_step=5, GSNR_topK=0.5):
        # expand_idxs, valid_idxs = [], []
        c_idxs = list(range(len(weak_set)))
        self.weak_set_size = len(c_idxs)
        few_shot_data, few_shot_data_list = self.FewShotDataList(valid_set)
        self.weak_set_weights = torch.zeros(len(weak_set), device=self.device)
        best_weights = self.weak_set_weights.clone()
        self.weight_grads = torch.zeros(len(weak_set), device=self.device)
        # ==============Adam to accelerate the convergence=============
        # beta1, beta2, eps = 0.9, 0.999, 1e-6
        # v = torch.zeros(len(weak_set), device=self.device)
        # s = torch.zeros(len(weak_set), device=self.device)
        # =============================================================
        min_loss, best_acc = 1e8, 0.0
        shuffled_indices = random.sample(c_idxs,
                                         len(c_idxs)) * 2
        init_state_dicts = model.state_dict()
        init_state_dicts = {key: init_state_dicts[key].clone() for key in init_state_dicts}
        init_state_dicts = OrderedDict(init_state_dicts.items())
        for epoch in range(max_meta_step):
            u = self.weak_set_weights.softmax(dim=0)
            model.zero_grad()
            for i in trange(0, len(c_idxs), self.batch_size):
                indices = c_idxs[i:min(len(c_idxs), i+self.batch_size)]
                batch, indices = self.InnerBatch(weak_set, indices, device=self.device)
                loss = self.LossList(model, batch)
                sum_loss = (u[indices]*loss).sum()
                sum_loss.backward()
            model_optim.step()

            f_loss, f_acc = self.ClipValGradientV2(model, few_shot_data, few_shot_data_list, topk_ratio=GSNR_topK)
            if f_acc > best_acc:
                best_weights = self.weak_set_weights.clone()
                best_acc = f_acc
            model.load_state_dict(init_state_dicts)
            for step in trange(0, len(c_idxs), self.batch_size):
                indices = shuffled_indices[step:step + self.batch_size]
                batch, indices = self.InnerBatch(weak_set, indices, device=self.device)
                grads = self.ComputeGrads4Weights(model, batch, few_shot_data, few_shot_data_list)
                self.weight_grads[indices] = -1*self.lr4model*grads
            self.weight_grads = torch.matmul(
                    self.weight_grads.unsqueeze(0),
                    torch.matmul(u.unsqueeze(1),-1*u.unsqueeze(0)) + \
                    torch.eye(len(u), device=u.device)*u.unsqueeze(1)
            ).squeeze(0)
            print("u:", u)
            print("self.weights:", self.weak_set_weights)
            print("self.weight_grads:", self.weight_grads)
            #==============Update process in Adam=======================
            # v = beta1 * v + (1 - beta1) * self.weight_grads
            # s = beta2 * s + (1 - beta2) * (self.weight_grads.pow(2))
            # v_bias_corr = v / (1 - beta1 ** (epoch + 1))
            # s_bias_corr = s / (1 - beta2 ** (epoch + 1))
            # update = weight_eta * v_bias_corr / (s_bias_corr.sqrt() + eps)
            # ==============Update process in SGD=======================
            self.weight_grads = self.weight_grads / self.weight_grads.norm(2)
            update = weight_eta*self.weight_grads
            # =============================================================
            self.weak_set_weights = self.weak_set_weights  - update
        self.weak_set_weights = best_weights

class MetaEvaluatorV2(InstanceReweightingV2):
    def __init__(self, model: VanillaBert, weak_set, few_shot_set,
                 weak_set_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, coeff4expandset=1.0, max_few_shot_size=20,
                 batch_size=5):
        super(MetaEvaluatorV2, self).__init__(model, weak_set, few_shot_set, weak_set_label, exp_idxs, weak_set_weights,
                                            convey_fn, lr4model, coeff4expandset, max_few_shot_size, batch_size)

    def weightsLR(self, eta=0.01, grad_weights=None):
        return eta / grad_weights.abs().mean()

    def InnerBatch(self, indices=None, batch_size=-1, dataset=None, device=None):
        batch_size = self.batch_size if batch_size == -1 else batch_size
        if dataset is None:
            if indices is None:
                indices = random.sample(list(range(len(self.weak_set))), batch_size)
            items = [self.weak_set[idx] for idx in indices]
            batch = self.weak_set.collate_raw_batch(items)
        else:
            if indices is None:
                indices = random.sample(list(range(len(dataset))), batch_size)
            items = [dataset[idx] for idx in indices]
            batch = dataset.collate_raw_batch(items)
        device = self.device if device is None else device
        return batch, torch.tensor(indices, dtype=torch.int64, device=device)

    def FewShotDataList(self):
        if len(self.few_shot_set) > self.max_few_shot_size:
            few_shot_data = None
            few_shot_data_list = [self.few_shot_set.collate_raw_batch(
                                            [self.few_shot_set[j] for j in range(i,
                                                                                 min(i+self.max_few_shot_size,
                                                                                     len(self.few_shot_set)))])
                                            for i in range(0, len(self.few_shot_set), self.max_few_shot_size)]
        else:
            few_shot_data = self.few_shot_set.collate_raw_batch(
                [self.few_shot_set[i] for i in range(len(self.few_shot_set))]
            )
            few_shot_data_list = None
        return few_shot_data, few_shot_data_list

    def ExpandValidIdxs(self, indices, valid_idxs, expand_idxs=[], pop_ratio=0.5):
        popOUT_v = int(self.batch_size * pop_ratio)
        _, idxs = self.weak_set_weights[indices].sort()
        valid_idxs.extend(indices[idxs[-popOUT_v:]].tolist())
        # print("valid_idxs:", valid_idxs)
        return expand_idxs, valid_idxs

    def Evaluate(self, weight_eta, max_meta_step=5, GSNR_topK=0.5):
        # expand_idxs, valid_idxs = [], []
        c_idxs = list(range(len(self.weak_set)))
        self.weak_set_size = len(c_idxs)
        few_shot_data, few_shot_data_list = self.FewShotDataList()
        self.weak_set_weights = torch.zeros(len(self.weak_set), device=self.device)
        best_weights = self.weak_set_weights.clone()
        self.weight_grads = torch.zeros(len(self.weak_set), device=self.device)
        # ==============Adam to accelerate the convergence=============
        # beta1, beta2, eps = 0.9, 0.999, 1e-6
        # v = torch.zeros(len(self.weak_set), device=self.device)
        # s = torch.zeros(len(self.weak_set), device=self.device)
        # =============================================================
        min_loss, best_acc = 1e8, 0.0
        shuffled_indices = random.sample(c_idxs,
                                         len(c_idxs)) * 2
        init_state_dicts = self.model.state_dict()
        init_state_dicts = {key: init_state_dicts[key].clone() for key in init_state_dicts}
        init_state_dicts = OrderedDict(init_state_dicts.items())
        for epoch in range(max_meta_step):
            u = self.weak_set_weights.softmax(dim=0)
            self.model.zero_grad()
            for i in trange(0, len(c_idxs), self.batch_size):
                indices = c_idxs[i:min(len(c_idxs), i+self.batch_size)]
                batch, indices = self.InnerBatch(indices, device=self.device)
                loss = self.LossList(batch)
                sum_loss = (u[indices]*loss).sum()
                sum_loss.backward()
            self.model_optim.step()
            f_loss, f_acc = self.ClipValGradientV2(few_shot_data, few_shot_data_list, topk_ratio=GSNR_topK)
            if f_acc > best_acc:
                best_weights = self.weak_set_weights.clone()
                best_acc = f_acc

            self.model.load_state_dict(init_state_dicts)
            for step in trange(0, len(c_idxs), self.batch_size):
                indices = shuffled_indices[step:step + self.batch_size]
                batch, indices = self.InnerBatch(indices, device=self.device)
                grads = self.ComputeGrads4Weights(batch, few_shot_data, few_shot_data_list)
                self.weight_grads[indices] = -1*self.lr4model*grads
            self.weight_grads = torch.matmul(
                    self.weight_grads.unsqueeze(0),
                    torch.matmul(u.unsqueeze(1),-1*u.unsqueeze(0)) + \
                    torch.eye(len(u), device=u.device)*u.unsqueeze(1)
            ).squeeze(0)
            print("u:", u)
            print("self.weights:", self.weak_set_weights)
            print("self.weight_grads:", self.weight_grads)
            #==============Update process in Adam=======================
            # v = beta1 * v + (1 - beta1) * self.weight_grads
            # s = beta2 * s + (1 - beta2) * (self.weight_grads.pow(2))
            # v_bias_corr = v / (1 - beta1 ** (epoch + 1))
            # s_bias_corr = s / (1 - beta2 ** (epoch + 1))
            # update = weight_eta * v_bias_corr / (s_bias_corr.sqrt() + eps)
            # ==============Update process in SGD=======================
            self.weight_grads = self.weight_grads / self.weight_grads.norm(2)
            update = weight_eta*self.weight_grads
            # =============================================================
            self.weak_set_weights = self.weak_set_weights  - update
        self.weak_set_weights = best_weights
        # return valid_idxs

class MetaEvaluator(InstanceReweighting):
    def __init__(self, model: VanillaBert, weak_set, few_shot_set,
                 weak_set_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, scale_lr4model=1e-3, coeff4expandset=1.0, max_few_shot_size=20,
                 batch_size=5):
        super(MetaEvaluator, self).__init__(model, weak_set, few_shot_set, weak_set_label, exp_idxs,
                                            weak_set_weights, convey_fn, lr4model, scale_lr4model,
                                            coeff4expandset, max_few_shot_size, batch_size)

    def weightsGrad(self, step, batch, tmp_model, few_shot_data=None, few_shot_data_list=None, return_loss=False):
        assert few_shot_data is not None or few_shot_data_list is not None
        if few_shot_data_list is None:
            weights = torch.zeros(self.batch_size, device=tmp_model.device)
            grad_weights, few_loss = self.ComputeGrads4Weights(step, batch, weights, tmp_model, few_shot_data)
            few_shot_loss = few_loss.data
        else:
            print("-------> few shot data list ------>")
            grad_weights_list, loss_list = [], []
            for i, few_data in enumerate(few_shot_data_list):
                weights = torch.zeros(self.batch_size, device=tmp_model.device)
                weights_grad, few_loss = self.ComputeGrads4Weights(step + i, batch, weights, tmp_model, few_data)
                grad_weights_list.append(weights_grad)
                loss_list.append(few_loss.data.cpu())
                torch.cuda.empty_cache()
                tmp_model.load_state_dict(self.model.state_dict())
            grad_weights = torch.stack(grad_weights_list).sum(dim=0)
            few_shot_loss = np.mean(loss_list)
            print('===> Mean Few Shot Loss %3d | %3d ####, loss = %6.8f' % (
                step, self.weak_set_size, few_shot_loss
            ))

        if self.expand_data_list is not None:
            print("-------> expand data list ------>")
            grad_weights_list, loss_list = [], []
            init_state_dicts = tmp_model.state_dict()
            for i, few_data in enumerate(self.expand_data_list):
                weights = torch.zeros(self.batch_size, device=tmp_model.device)
                weights_grad, few_loss = self.ComputeGrads4Weights(step + i, batch, weights, tmp_model, few_data)
                grad_weights_list.append(weights_grad)
                loss_list.append(few_loss.data.cpu())
                tmp_model.load_state_dict(init_state_dicts)
                torch.cuda.empty_cache()
            e_grad_weights = torch.stack(grad_weights_list).sum(dim=0)
            grad_weights = grad_weights + e_grad_weights
            few_shot_loss = few_shot_loss + np.mean(loss_list)
            print('===> Mean Expand Loss %3d | %3d ####, loss = %6.8f' % (
                step, self.weak_set_size, few_shot_loss
            ))
        torch.cuda.empty_cache()
        if return_loss:
            return grad_weights, few_shot_loss
        else:
            return grad_weights

    def OptimizeWeights(self, step, batch, indices, lr4weights, few_shot_data=None, few_shot_data_list=None,
                        device: torch.device = torch.device("cuda:0"), tmp_model=None, max_meta_steps=100):
        assert few_shot_data_list is not None or few_shot_data is not None
        all_weights = self.weak_set_weights.to(device)
        indices = indices.to(device)
        if tmp_model is None:
            tmp_model = copy.deepcopy(self.model)
            tmp_model.set_device(torch.device("cuda:1"))
        tmp_model.load_state_dict(self.model.state_dict())
        # ==============Adam to accelerate the convergence=============
        beta1, beta2, eps = 0.9, 0.999, 1e-6
        v = torch.zeros(self.batch_size).to(tmp_model.device)
        s = torch.zeros(self.batch_size).to(tmp_model.device)
        # =============================================================
        print("=====> init acc:",
              WeightedAcc(self.weak_set_label.to(tmp_model.device)[indices],
                          torch.tensor(self.weak_set.label, device=tmp_model.device)[indices],
                          all_weights[indices]
                          ))
        print("=====> init weights:", all_weights[indices])
        previous_loss = 1e8
        for meta_step in range(max_meta_steps):
            grad_weights, few_loss = self.weightsGrad(step, batch, tmp_model, few_shot_data,
                                            few_shot_data_list, return_loss=True)
            if not hasattr(self, "lr4weights"):
                self.lr4weights = 0.01/grad_weights.abs().median()

            if few_loss > previous_loss:
                break
            else:
                previous_loss = few_loss
            #==============Update process in Adam=======================
            v = beta1 * v + (1 - beta1) * grad_weights
            s = beta2 * s + (1 - beta2) * (grad_weights.pow(2))
            v_bias_corr = v / (1 - beta1 ** (meta_step + 1))
            s_bias_corr = s / (1 - beta2 ** (meta_step + 1))
            update = self.lr4weights * v_bias_corr / (s_bias_corr.sqrt() + eps)

            all_weights[indices] -= update
            # =============================================================
            tmp_model.load_state_dict(self.model.state_dict())
            model_grads = self.ModelGrads(batch, all_weights[indices].to(tmp_model.device),
                                          tmp_model, create_graph=False)
            update_params(tmp_model, self.lr4model * self.scale_lr4model, source_params=model_grads)
            torch.cuda.empty_cache()
        print("=====> Optimized acc:",
              WeightedAcc(self.weak_set_label.to(tmp_model.device)[indices],
                          torch.tensor(self.weak_set.label, device=tmp_model.device)[indices],
                          all_weights[indices]
                          ))
        print("=====> Optimized weights:", all_weights[indices])
        self.weak_set_weights[indices] = all_weights[indices].cpu()

    def seqOptimizeWeights(self, step, batch, indices, lr4weights, few_shot_data=None, few_shot_data_list=None,
                        device: torch.device = torch.device("cuda:0"), tmp_model=None, max_meta_steps=100):
        assert few_shot_data_list is not None or few_shot_data is not None
        all_weights = self.weak_set_weights.to(device)
        indices = indices.to(device)
        if tmp_model is None:
            tmp_model = copy.deepcopy(self.model)
            tmp_model.set_device(torch.device("cuda:1"))
        tmp_model.load_state_dict(self.model.state_dict())
        # ==============Adam to accelerate the convergence=============
        beta1, beta2, eps = 0.9, 0.999, 1e-6
        v = torch.zeros(self.batch_size).to(tmp_model.device)
        s = torch.zeros(self.batch_size).to(tmp_model.device)
        # =============================================================
        print("=====> init acc:",
              WeightedAcc(self.weak_set_label.to(tmp_model.device)[indices],
                          torch.tensor(self.weak_set.label, device=tmp_model.device)[indices],
                          all_weights[indices]
                          ))
        print("=====> init weights:", all_weights[indices])
        for meta_step in range(max_meta_steps):
            meta_eval_data = [few_shot_data] if few_shot_data is not None \
                                                    else few_shot_data_list
            for idx, subset in enumerate(meta_eval_data):
                weights = torch.zeros(self.batch_size, device=tmp_model.device)
                grad_weights = self.ComputeGrads4Weights(step+idx, batch, weights, tmp_model, subset)
                grad_weights = grad_weights / grad_weights.norm()
                #             ==============Update process in Adam=======================
                v = beta1 * v + (1 - beta1) * grad_weights
                s = beta2 * s + (1 - beta2) * (grad_weights.pow(2))
                v_bias_corr = v / (1 - beta1 ** (meta_step + 1))
                s_bias_corr = s / (1 - beta2 ** (meta_step + 1))
                update = lr4weights * v_bias_corr / (s_bias_corr.sqrt() + eps)
                #             update = lr4weights*grad_weights
                all_weights[indices] -= update
                # =============================================================
                tmp_model.load_state_dict(self.model.state_dict())
                model_grads = self.ModelGrads(batch, all_weights[indices].to(tmp_model.device),
                                              tmp_model, create_graph=False)
                update_params(tmp_model, self.lr4model * self.scale_lr4model, source_params=model_grads)
                torch.cuda.empty_cache()
        print("=====> Optimized acc:", WeightedAcc(self.weak_set_label.to(tmp_model.device)[indices],
                                                  torch.tensor(self.weak_set.label, device=tmp_model.device)[indices],
                                                  all_weights[indices]
                                                  ))
        print("=====> Optimized weights:", all_weights[indices])
        self.weak_set_weights[indices] = all_weights[indices].cpu()


    def Evaluate(self, max_epochs=10, max_meta_steps=10, lr4weights=1.0):
        expand_idxs = []
        tmp_model_device = torch.device("cuda:1") if torch.cuda.device_count() >= 2 \
            else torch.device("cuda:0")
        if len(self.few_shot_set) > self.max_few_shot_size:
            few_shot_data = None
            few_shot_data_list = [self.few_shot_set.collate_raw_batch(
                                            [self.few_shot_set[j] for j in range(i,
                                                                                 min(i+self.max_few_shot_size,
                                                                                     len(self.few_shot_set)))])
                                            for i in range(0, len(self.few_shot_set), self.max_few_shot_size)]
        else:
            few_shot_data = self.few_shot_set.collate_raw_batch(
                [self.few_shot_set[i] for i in range(len(self.few_shot_set))]
            )
            few_shot_data_list = None
        self.weak_set_weights = torch.zeros(len(self.weak_set))
        weak_labels = self.weak_set.labelTensor()
        for epoch in range(max_epochs):
            shuffled_indices = random.sample(list(range(len(self.weak_set))),
                                             len(self.weak_set)) * 2
            lr4weights = max(lr4weights * (0.5 ** epoch), 0.1)
            for step in range(0, len(self.weak_set), self.batch_size):
                indices = shuffled_indices[step:step + self.batch_size]
                batch, indices = self.SampleBatch(indices)
                self.OptimizeWeights(step, batch, indices, lr4weights, few_shot_data, few_shot_data_list,
                                     tmp_model_device, max_meta_steps=max_meta_steps)
                pos_weak_labels = weak_labels[indices].__eq__(1)
                neg_weak_labels = weak_labels[indices].__eq__(0)
                if pos_weak_labels.sum() == 0 or neg_weak_labels.sum() == 0:
                    _, idxs = self.weak_set_weights[indices].topk(self.batch_size // 10)
                    expand_idxs.extend(indices[idxs].tolist())
                else:
                    _, pos_idxs = self.weak_set_weights[indices][pos_weak_labels].topk(self.batch_size // 20)
                    _, neg_idxs = self.weak_set_weights[indices][neg_weak_labels].topk(self.batch_size // 20)
                    expand_idxs.extend(indices[pos_weak_labels][pos_idxs].tolist())
                    expand_idxs.extend(indices[neg_weak_labels][neg_idxs].tolist())
            # self.weak_set_weights = self.weak_set_weights / self.weak_set_weights.abs().sum()
        return expand_idxs

    def ValidIndicesOut(self, max_epochs=10, max_meta_steps=10, lr4weights=1.0, pseaudo_idxs=None):
        expand_idxs = []
        valid_idxs = []
        c_idxs = set(range(len(self.weak_set))) if pseaudo_idxs is None else \
            set(range(len(self.weak_set))) - set(pseaudo_idxs)
        c_idxs = list(c_idxs) if len(self.expand_idxs) == 0 else \
            list(c_idxs - set(self.expand_idxs))
        self.weak_set_size = len(c_idxs)
        tmp_model_device = torch.device("cuda:1") if torch.cuda.device_count() >= 2 \
            else torch.device("cuda:0")
        if len(self.few_shot_set) > self.max_few_shot_size:
            few_shot_data = None
            few_shot_data_list = [self.few_shot_set.collate_raw_batch(
                                            [self.few_shot_set[j] for j in range(i,
                                                                                 min(i+self.max_few_shot_size,
                                                                                     len(self.few_shot_set)))])
                                            for i in range(0, len(self.few_shot_set), self.max_few_shot_size)]
        else:
            few_shot_data = self.few_shot_set.collate_raw_batch(
                [self.few_shot_set[i] for i in range(len(self.few_shot_set))]
            )
            few_shot_data_list = None

        self.weak_set_weights = torch.zeros(len(self.weak_set))
        weak_labels = self.weak_set.labelTensor()
        for epoch in range(max_epochs):
            shuffled_indices = random.sample(c_idxs,
                                             len(c_idxs)) * 2
            lr4weights = max(lr4weights * (0.5 ** epoch), 0.1)
            for step in range(0, len(c_idxs), self.batch_size):
                indices = shuffled_indices[step:step + self.batch_size]
                batch, indices = self.SampleBatch(indices)
                self.OptimizeWeights(step, batch, indices, lr4weights, few_shot_data, few_shot_data_list,
                                     tmp_model_device, max_meta_steps=max_meta_steps)
                weak_labels_2 = weak_labels[indices].__eq__(2)
                weak_labels_1 = weak_labels[indices].__eq__(1)
                weak_labels_0 = weak_labels[indices].__eq__(0)
                if weak_labels_0.sum() == 0 or weak_labels_1.sum() == 0 or weak_labels_2.sum()==0:
                    _, idxs = self.weak_set_weights[indices].sort()
                    expand_idxs.extend(indices[idxs[-self.batch_size // 10:]].tolist())
                    valid_idxs.extend(indices[idxs[-self.batch_size // 6:]].tolist())
                else:
                    _, idxs_0 = self.weak_set_weights[indices][weak_labels_0].sort()
                    _, idxs_1 = self.weak_set_weights[indices][weak_labels_1].sort()
                    _, idxs_2 = self.weak_set_weights[indices][weak_labels_2].sort()
                    expand_idxs.extend(indices[weak_labels_0][idxs_0[-self.batch_size // 20:]].tolist())
                    expand_idxs.extend(indices[weak_labels_1][idxs_1[-self.batch_size // 20:]].tolist())
                    expand_idxs.extend(indices[weak_labels_2][idxs_2[-self.batch_size // 20:]].tolist())
                    valid_idxs.extend(indices[weak_labels_0][idxs_0[-self.batch_size // 6:]].tolist())
                    valid_idxs.extend(indices[weak_labels_1][idxs_1[-self.batch_size // 6:]].tolist())
                    valid_idxs.extend(indices[weak_labels_2][idxs_2[-self.batch_size // 6:]].tolist())
            self.weak_set_weights = self.weak_set_weights / self.weak_set_weights.abs().sum()
        return expand_idxs, valid_idxs

    def HalfOut(self, max_epochs=10, max_meta_steps=10, lr4weights=1.0, pseaudo_idxs=None):
        expand_idxs = []
        valid_idxs = []
        c_idxs = set(range(len(self.weak_set))) if pseaudo_idxs is None else \
            set(range(len(self.weak_set))) - set(pseaudo_idxs)
        c_idxs = list(c_idxs) if len(self.expand_idxs) == 0 else \
            list(c_idxs - set(self.expand_idxs))
        self.weak_set_size = len(c_idxs)
        tmp_model_device = torch.device("cuda:1") if torch.cuda.device_count() >= 2 \
            else torch.device("cuda:0")
        if len(self.few_shot_set) > self.max_few_shot_size:
            few_shot_data = None
            few_shot_data_list = [self.few_shot_set.collate_raw_batch(
                                            [self.few_shot_set[j] for j in range(i,
                                                                                 min(i+self.max_few_shot_size,
                                                                                     len(self.few_shot_set)))])
                                            for i in range(0, len(self.few_shot_set), self.max_few_shot_size)]
        else:
            few_shot_data = self.few_shot_set.collate_raw_batch(
                [self.few_shot_set[i] for i in range(len(self.few_shot_set))]
            )
            few_shot_data_list = None

        self.weak_set_weights = torch.zeros(len(self.weak_set))
        for epoch in range(max_epochs):
            shuffled_indices = random.sample(c_idxs,
                                             len(c_idxs)) * 2
            lr4weights = max(lr4weights * (0.5 ** epoch), 0.1)
            for step in range(0, len(c_idxs), self.batch_size):
                indices = shuffled_indices[step:step + self.batch_size]
                batch, indices = self.SampleBatch(indices)
                self.OptimizeWeights(step, batch, indices, lr4weights, few_shot_data, few_shot_data_list,
                                     tmp_model_device, max_meta_steps=max_meta_steps)
                valid_cnt = self.weak_set_weights[indices].__gt__(0).int().sum()
                popOUT_v = min(valid_cnt, self.batch_size//2)
                popOUT_e = popOUT_v//3
                _, idxs = self.weak_set_weights[indices].sort()
                expand_idxs.extend(indices[idxs[-popOUT_e:]].tolist())
                valid_idxs.extend(indices[idxs[-popOUT_v:]].tolist())
                print("expand_idxs:", expand_idxs)
                print("valid_idxs:", valid_idxs)
            self.weak_set_weights = self.weak_set_weights / self.weak_set_weights.abs().sum()
        return expand_idxs, valid_idxs

    def PopOut(self, max_epochs=10, max_meta_steps=10, lr4weights=1.0, pseaudo_idxs=None,
                    pop_ratio=0.5, few_shot_data=None, few_shot_data_list=None):
        valid_idxs = []
        c_idxs = set(range(len(self.weak_set))) if pseaudo_idxs is None else \
            set(range(len(self.weak_set))) - set(pseaudo_idxs)
        c_idxs = list(c_idxs) if len(self.expand_idxs) == 0 else \
            list(c_idxs - set(self.expand_idxs))
        self.weak_set_size = len(c_idxs)
        tmp_model_device = torch.device("cuda:1") if torch.cuda.device_count() >= 2 \
            else torch.device("cuda:0")
        if few_shot_data is None and few_shot_data_list is None:
            if len(self.few_shot_set) > self.max_few_shot_size:
                few_shot_data = None
                few_shot_data_list = [self.few_shot_set.collate_raw_batch(
                                                [self.few_shot_set[j] for j in range(i,
                                                                                     min(i+self.max_few_shot_size,
                                                                                         len(self.few_shot_set)))])
                                                for i in range(0, len(self.few_shot_set), self.max_few_shot_size)]
            else:
                few_shot_data = self.few_shot_set.collate_raw_batch(
                    [self.few_shot_set[i] for i in range(len(self.few_shot_set))]
                )
                few_shot_data_list = None

        self.weak_set_weights = torch.zeros(len(self.weak_set))
        for epoch in range(max_epochs):
            shuffled_indices = random.sample(c_idxs,
                                             len(c_idxs)) * 2
            lr4weights = max(lr4weights * (0.5 ** epoch), 0.1)
            for step in range(0, len(c_idxs), self.batch_size):
                indices = shuffled_indices[step:step + self.batch_size]
                batch, indices = self.SampleBatch(indices)
                self.OptimizeWeights(step, batch, indices, lr4weights, few_shot_data, few_shot_data_list,
                                     tmp_model_device, max_meta_steps=max_meta_steps)
                popOUT_v = int(self.batch_size*pop_ratio)
                _, idxs = self.weak_set_weights[indices].sort()
                valid_idxs.extend(indices[idxs[-popOUT_v:]].tolist())
                print("valid_idxs:", valid_idxs)
            self.weak_set_weights = self.weak_set_weights / self.weak_set_weights.abs().sum()
        return valid_idxs