import sys, os
sys.path.append("../../")
sys.path.append("../")
from Dataloader.weiboloader import BiGCNWeiboSet
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from RumdetecFramework.BaseRumorFramework import RumorDetection
from  RumdetecFramework.InstanceReweighting import InstanceReweightingV4
import random, numpy as np
from tqdm import trange
import torch, fitlog
import torch.nn.functional as F

class GradientReversal(torch.autograd.Function):
    """
    Basic layer for doing gradient reversal
    """
    lambd = 1.0
    @staticmethod
    def forward(ctx, x):
        return x

    @staticmethod
    def backward(ctx, grad_output):
        return GradientReversal.lambd * grad_output.neg()

def Generator1(pseudo_target, labeled_source, labeled_target, batchSize):
    print("Generator1")
    if not hasattr(pseudo_target, 'valid_indexs'):
        pseudo_target.valid_indexs = list(range(len(pseudo_target)))
    if not hasattr(labeled_source, 'valid_indexs'):
        labeled_source.valid_indexs = list(range(len(labeled_source)))
    if not hasattr(labeled_target, 'valid_indexs'):
        labeled_target.valid_indexs = list(range(len(labeled_target)))
    PT_base, LS_base, LT_base = 0, len(pseudo_target), len(pseudo_target)+ len(labeled_target)
    idxsLS = random.sample(labeled_source.valid_indexs, len(labeled_source.valid_indexs)) * 2
    idxsPT = random.sample(pseudo_target.valid_indexs, len(pseudo_target.valid_indexs)) * 2
    if len(labeled_target.valid_indexs) > 5:
        bs_LT = 5
    else:
        bs_LT = len(labeled_target.valid_indexs)
    for i in range(0, len(pseudo_target.valid_indexs), batchSize//2):
        # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
        start_LS, start_PT = i%len(labeled_source.valid_indexs), i
        end_LS, end_PT = start_LS + batchSize//2, (start_PT + batchSize//2) - bs_LT
        items1 = [labeled_source[jj] for jj in idxsLS[start_LS:end_LS]]
        items2 = [pseudo_target[jj] for jj in idxsPT[start_PT:end_PT]]
        idxs_LT = random.sample(labeled_target, bs_LT)
        items3 = [labeled_target[jj] for jj in idxs_LT]
        yield labeled_source.collate_raw_batch(items1 + items2 + items3), \
          idxsPT[start_PT:end_PT]+ [idx+LS_base for idx in idxsLS[start_LS:end_LS]] + [idx+LT_base for idx in idxs_LT]

def Generator2(pseudo_target, labeled_source, batchSize):
    print("Generator2")
    if not hasattr(pseudo_target, 'valid_indexs'):
        pseudo_target.valid_indexs = list(range(len(pseudo_target)))
    if not hasattr(labeled_source, 'valid_indexs'):
        labeled_source.valid_indexs = list(range(len(labeled_source)))
    PT_base, LS_base = 0, len(pseudo_target)
    idxsLS = random.sample(labeled_source.valid_indexs, len(labeled_source.valid_indexs)) * 2
    idxsPT = random.sample(pseudo_target.valid_indexs, len(pseudo_target.valid_indexs)) * 2
    for i in range(0, len(pseudo_target.valid_indexs), batchSize // 2):
        # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
        start_LS, start_PT = i%len(labeled_source.valid_indexs), i
        end_LS, end_PT = start_LS + batchSize // 2, start_PT + batchSize // 2
        items1 = [labeled_source[jj] for jj in idxsLS[start_LS:end_LS]]
        items2 = [pseudo_target[jj] for jj in idxsPT[start_PT:end_PT]]
        yield labeled_source.collate_raw_batch(items1 + items2), \
              idxsPT[start_PT:end_PT]+ [idx+LS_base for idx in idxsLS[start_LS:end_LS]]

def Generator3(pseudo_target, batchSize):
    print("Generator3")
    if not hasattr(pseudo_target, 'valid_indexs'):
        pseudo_target.valid_indexs = list(range(len(pseudo_target)))
    idxsPT = random.sample(pseudo_target.valid_indexs, len(pseudo_target.valid_indexs)) * 2
    for i in range(0, len(pseudo_target.valid_indexs), batchSize):
        # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
        start_PT, end_PT = i, i+ batchSize
        items = [pseudo_target[jj] for jj in idxsPT[start_PT:end_PT]]
        batch = pseudo_target.collate_raw_batch(items)
        yield batch, idxsPT[start_PT:end_PT]

def DataIter(unlabeled_target, labeled_source=None, labeled_target=None, batch_size=32):
    if labeled_target is not None:
        assert len(labeled_target) > 0
        return  Generator1(unlabeled_target, labeled_source, labeled_target, batch_size)
    elif labeled_source is not None:
        return Generator2(unlabeled_target, labeled_source, batch_size)
    else:
        return Generator3(unlabeled_target, batch_size)

class MSTDataset(BiGCNWeiboSet):
    def __init__(self, batchsize=20):
        super(MSTDataset, self).__init__(batchsize)

    @property
    def label(self):
        if isinstance(self.data_y, list):
            self.data_y = np.array(self.data_y)
        return self.data_y

    def setLabel(self, label, idxs):
        if isinstance(self.data_y, list):
            self.data_y = np.array(self.data_y)
        self.data_y[idxs] = label

    def labelTensor(self, device=None):
        return torch.tensor(self.data_y, dtype=torch.float32, device=device)

    def collate_raw_batch(self, batch):
        seqs = [item[0] for item in batch]
        TD_graphs = [item[1] for item in batch]
        BU_graphs = [item[2] for item in batch]
        labels = [item[3] for item in batch]
        topic_labels = [item[4] for item in batch]
        return seqs, TD_graphs, BU_graphs, torch.tensor(labels), torch.tensor(topic_labels)

class SelfTrainningInterface:
    def __init__(self):
        pass

    def obtainOptim(self, model, learning_rate):
        optimizerGroupedParameters = [
                                         {'params': p,
                                          'lr': learning_rate * pow(0.8, 12 - int(
                                              n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
                                              else (learning_rate * pow(0.8, 13) if "embedding" in n else learning_rate)
                                          # layer-wise fine-tuning
                                          } for n, p in model.named_parameters()
                                     ]
        optim = torch.optim.SGD(optimizerGroupedParameters)
        return optim

    def acc_P_R_F1(self, y_true, y_pred):
        return accuracy_score(y_true.cpu(), y_pred.cpu()), \
                    precision_recall_fscore_support(y_true.cpu(), y_pred.cpu())

    def augPredict(self, model, batch):
        assert hasattr(self, 'isAug') and self.isAug
        rand = random.random()
        if rand < 0.2:
            model.sent2vec.set_aug_type("gaussian")
        elif rand < 0.4:
            model.sent2vec.set_aug_type("g_blur")
        elif rand < 0.6:
            self.isAug = False
            model.sent2vec.set_aug_type(None)
            loss, acc = self.lossAndAcc(model, batch)
            loss.backward()
            model.sent2vec.set_aug_type("adver")
            self.isAug = True
        elif rand < 0.8:
            model.sent2vec.set_aug_type("rMask")
        else:
            model.sent2vec.set_aug_type("rReplace")
        preds = model.AugPredict(batch)
        return preds

    def predict(self, model, batch):
        if hasattr(self, 'isAug'):
            if self.isAug:
                return self.augPredict(model, batch)
        return model.predict(batch)

    def dataset_logits(self, model: RumorDetection, data, idxs=None, batch_size=40):
        preds = []
        if idxs is None:
            idxs = list(range(len(data)))
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i + batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.predict(batch)
            preds.append(pred)
        pred_tensor = torch.cat(preds)
        return pred_tensor

    def dataset_inference(self, model:RumorDetection, data, idxs=None, batch_size=20):
        pred_tensor = self.dataset_logits(model, data, idxs, batch_size)
        vals, idxs = pred_tensor.sort(dim=1)
        return idxs[:, -1], vals[:, -1]

    def perf(self, model:RumorDetection, data, label, idxs=None, batch_size=20):
        with torch.no_grad():
            y_pred, _ = self.dataset_inference(model, data, idxs=idxs, batch_size=batch_size)
        y_true = label[idxs] if idxs is not None else label
        return self.acc_P_R_F1(y_true, y_pred)

    def annotate(self, model: RumorDetection, data, pseaudo_idxs=[], batch_size=20):
        c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
        with torch.no_grad():
            pred_tensor = self.dataset_logits(model, data, idxs=c_idxs, batch_size=batch_size)
            preds = pred_tensor.argmax(dim=1)
            weakLabel = preds.cpu().numpy()
            data.setLabel(weakLabel, c_idxs)
            if not hasattr(data, "logits"):
                data.logits = torch.zeros([len(data), pred_tensor.size(1)], device=pred_tensor.device)
            data.logits[c_idxs] = pred_tensor

class MetaSelfTrainingV3(InstanceReweightingV4, SelfTrainningInterface):
    def __init__(self, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainingV3, self).__init__(class_num, lr4model, coeff4expandset, max_few_shot_size, Inner_BatchSize)
        self.log_dir = log_dir
        if not os.path.exists(log_dir):
            os.system("mkdir {}".format(log_dir))
        fitlog.set_log_dir("{}/".format(log_dir), new_log=True)
        self.suffix = suffix
        self.weight_eta = weight_eta
        self.meta_step = meta_step
        self.extra_step = extra_step
        self.best_valid_acc = 0.0
        self.minMetaLoss = 1e8
        self.self_train_loop_cnt, self.counter, self.valid_counter = 0, 0, 0

    def DataIter(self, unlabeled_target, labeled_source=None, labeled_target=None, batch_size=32):
        return DataIter(unlabeled_target, labeled_source, labeled_target, batch_size)

    def valid(self, model, test_set, test_label, test_suffix, step=0):
        rst_model = self.perf(model, test_set, test_label)
        acc_v, (p_v, r_v, f1_v, _) = rst_model
        print("self train loop {}| step = {} : ".format(self.self_train_loop_cnt, self.valid_counter), rst_model)
        output_items = [("valid_acc", acc_v)] + \
                       [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                       [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                       [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
        fitlog.add_metric({f"{test_suffix}": dict(output_items)}, step=self.valid_counter)
        self.valid_counter += 1

    def MetaValidation(self, model:RumorDetection, optim:torch.optim, batch,
                    u:torch.Tensor, few_data=None, few_data_list=None):
        model.zero_grad()
        loss = self.LossList(model, batch)
        sumLoss = (u * loss).sum()
        sumLoss.backward()
        optim.step()
        self.val_grad_dicts, fewLoss, fewAcc = self.meanGradOnValSet(model, few_shot_data=few_data,
                                                                     few_shot_data_list=few_data_list)
        return fewLoss, fewAcc

    def MetaStep(self, model:RumorDetection, optim:torch.optim, batch,
                    weight:torch.Tensor, few_data=None, few_data_list=None):
        if (few_data is None) and (few_data_list is None):
            assert hasattr(self, "few_shot_data")
            assert hasattr(self, "few_shot_data_list")
            few_data, few_data_list = self.few_shot_data, self.few_shot_data_list

        self.epsilon = 1e-5
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        weight_best = None
        optim_step = self.meta_step
        for step in range(self.meta_step):
            u = weight.sigmoid()
            fewLoss, fewAcc = self.MetaValidation(model, optim, batch, u, few_data, few_data_list)
            print(f"##Perf on Meta Val Set## {step} | {self.meta_step} :  \
                        loss/acc = {fewLoss}/{fewAcc}, PreFewLoss={self.minMetaLoss}")

            if fewLoss < self.minMetaLoss:
                self.minMetaLoss = fewLoss
                weight_best = weight.clone()
                if optim_step == self.meta_step:
                    optim_step = step

            if step == 0:
                fitlog.add_metric({"meta_valid_loss" : fewLoss}, step=self.counter)
                fitlog.add_metric({"meta_valid_acc" : fewAcc}, step=self.counter)
                self.counter += 1

            if (step == optim_step + self.extra_step) or ((step+1)==self.meta_step):
                break

            model.load_state_dict(initStateDicts)
            u_grads = self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            # print("u_grads : ", u_grads)
            w_grads = u_grads*u*(1-u)
            weightGrads = -1*w_grads / (w_grads.norm(2) + 1e-8)
            # weightGrads = -1* w_grads#.sign()
            update = self.weight_eta * weightGrads
            weight = weight - update#*(weight_mask.to(update.device))

        u = weight.sigmoid()
        fewLoss, fewAcc = self.MetaValidation(model, optim, batch, u, few_data, few_data_list)
        if fewLoss < self.minMetaLoss:
            self.minMetaLoss = fewLoss
            weight_best = weight.clone()
        model.load_state_dict(initStateDicts)
        return step+1, weight_best

    def OptimStep(self, model, model_optim, batch, weight):
        loss = self.LossList(model, batch)
        sumLoss = ((weight.sigmoid()) * loss).sum()
        # sumLoss = (weight * loss).sum()
        model_optim.zero_grad()
        sumLoss.backward()
        model_optim.step()

    def Training(self, model:RumorDetection, unlabeled_target:MSTDataset, valid_set:MSTDataset, UT_Label,
                 labeled_source:MSTDataset=None, labeled_target:MSTDataset=None,
                 max_epoch=100, max_valid_every=100, model_file="./tmp.pkl"):
        self.self_train_loop_cnt += 1
        meta_optim = torch.optim.SGD(model.parameters(), lr=self.lr4model)
        model_optim = torch.optim.Adam(model.parameters(), lr=self.lr4model)
        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)

        weights = [0.0]*len(unlabeled_target) + \
                    ([] if labeled_source is None else [0.0]*len(labeled_source)) + \
                        ([] if labeled_target is None else [10.0]*len(labeled_target))

        self.train_set_weights = torch.tensor(weights, device=self.device)
        step = 0
        self.annotate(model, unlabeled_target)
        for epoch in range(max_epoch):
            for batch, indices in self.DataIter(unlabeled_target, labeled_source=labeled_source,
                                                 labeled_target=labeled_target, batch_size=self.batch_size):
                weights = self.train_set_weights[indices].clone()
                meta_step_count, new_weights = self.MetaStep(model, meta_optim, batch, weights)

                if new_weights is not None:
                    self.train_set_weights[indices] = new_weights
                    self.OptimStep(model, model_optim, batch, new_weights)
                else:
                    loss, acc = self.lossAndAcc(model, batch)
                    loss.backward()
                    model_optim.step()

                if (step+1) % max_valid_every == 0:
                    self.valid(model, unlabeled_target, UT_Label, self.suffix, step)
                step += 1

class MetaSelfTrainingV5(MetaSelfTrainingV3):
    def __init__(self, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainingV5, self).__init__(class_num, log_dir, suffix, weight_eta, lr4model, coeff4expandset,
                                                 max_few_shot_size, Inner_BatchSize, meta_step, extra_step)


    def FewShotDataList(self, few_shot_set):
        f_label = few_shot_set.labelTensor()
        f_label = f_label.argmax(dim=1) if f_label.dim() == 2 else f_label
        cnt = [f_label.__eq__(y_idx).sum().data.item() for y_idx in range(self.class_num)]
        assert sum(cnt) == len(few_shot_set)
        self.f_label_weight = torch.tensor([1.0/(self.class_num*y_cnt) for y_cnt in cnt], device=self.device)

        if len(few_shot_set) > self.max_few_shot_size:
            few_shot_data = None
            few_shot_data_list = [few_shot_set.collate_raw_batch(
                                            [few_shot_set[j] for j in range(i,
                                                                                 min(i+self.max_few_shot_size,
                                                                                     len(few_shot_set)))])
                                            for i in range(0, len(few_shot_set), self.max_few_shot_size)]
        else:
            few_shot_data = few_shot_set.collate_raw_batch(
                [few_shot_set[i] for i in range(len(few_shot_set))]
            )
            few_shot_data_list = None
        return few_shot_data, few_shot_data_list

class MetaSelfTrainingV7(MetaSelfTrainingV5):
    def __init__(self, seed, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainingV7, self).__init__(class_num, log_dir, suffix, weight_eta, lr4model, coeff4expandset,
                                                 max_few_shot_size, Inner_BatchSize, meta_step, extra_step)
        self.seed = seed

    def ConstructExpandData(self, weak_set, expand_idxs):
        self.e_label_weight = torch.tensor([1.0 / len(expand_idxs) for _ in range(self.class_num)], device=self.device)
        # extract out the expand data
        print("- - - - - update expand data - - ->")
        self.expand_data_list = [weak_set.collate_raw_batch(
            [weak_set[idx] for idx in expand_idxs[i:min(i + self.max_few_shot_size, len(expand_idxs))]]
        ) for i in range(0, len(expand_idxs), self.max_few_shot_size)]

        # modify the valid_indexs,
        if not hasattr(weak_set, "valid_indexs"):
            weak_set.valid_indexs = [idx for idx in range(len(weak_set)) if not (idx in expand_idxs)]
        else:
            weak_set.valid_indexs = [idx for idx in weak_set.valid_indexs if not (idx in expand_idxs)]
        self.minMetaLoss = 1e8 # the valid set has changed, so we need to reset the global minMetaLoss

    def meanGradOnValSet(self, model, few_shot_data=None, few_shot_data_list=None):
        model.zero_grad()
        assert few_shot_data is not None or few_shot_data_list is not None
        if few_shot_data_list is None:
            loss, acc = self.lossAndAcc(model, few_shot_data, label_weight=self.f_label_weight, reduction="sum")
            loss.backward()
            loss = loss.data.item()
        else:
            print("-------> few shot data list ------>")
            f_loss_list, f_acc_list = [], []
            for i, few_data in enumerate(few_shot_data_list):
                # sum the loss with the averaged weights or the biased weights
                # will allow computing the mean risk on any size of few shot dataset
                f_loss, f_acc = self.lossAndAcc(model, few_data, label_weight=self.f_label_weight, reduction="sum")
                f_loss.backward()
                f_loss_list.append(f_loss.data.item())
                f_acc_list.append(f_acc)
                torch.cuda.empty_cache()
            loss, acc = np.sum(f_loss_list), np.mean(f_acc_list)

        if self.expand_data_list is not None:
            if len(self.expand_data_list) > 0:
                assert hasattr(self, 'e_label_weight')
                print("-------> expand data list ------>")
                e_loss_list, e_acc_list = [], []
                for i, e_data in enumerate(self.expand_data_list):
                    e_loss, e_acc = self.lossAndAcc(model, e_data, label_weight=self.e_label_weight, reduction="sum")
                    (e_loss * self.coefff4expandset).backward()
                    e_loss_list.append(e_loss.data.item())
                    e_acc_list.append(e_acc)
                    torch.cuda.empty_cache()
                exp_loss, exp_acc = np.sum(e_loss_list), np.mean(e_acc_list)
                print(f"##Perf on Meta Expand Set##  : exp_loss/exp_acc = {exp_loss}/{exp_acc}")
                grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
                return grad_dicts, \
                       loss + self.coefff4expandset*exp_loss, \
                       acc
        grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
        return grad_dicts, loss, acc

    def entrophy_select(self, unlabeled_target:MSTDataset, topk=100):
        assert hasattr(unlabeled_target, "logits")
        entrophy = (-1 * unlabeled_target.logits * (unlabeled_target.logits.log())).sum(dim=1)
        topK_idxs = entrophy.argsort()[ 0 : topk ].tolist()
        return topK_idxs

    def initTrainingEnv(self):
        random.seed(self.seed)
        np.random.seed(self.seed)
        torch.manual_seed(self.seed)
        torch.cuda.manual_seed_all(self.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        self.self_train_loop_cnt += 1

    def Training(self, model:RumorDetection, unlabeled_target:MSTDataset, valid_set:MSTDataset, UT_Label,
                 labeled_source:MSTDataset=None, labeled_target:MSTDataset=None,
                 max_epoch=100, max_valid_every=100, model_file="./tmp.pkl"):
        self.initTrainingEnv()
        meta_optim = torch.optim.SGD(model.parameters(), lr=self.lr4model)
        model_optim = torch.optim.Adam(model.parameters(), lr=self.lr4model)

        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)
        if not hasattr(self, "raw_weights"):
            weights = [0.0]*len(unlabeled_target) + \
                        ([] if labeled_source is None else [0.0]*len(labeled_source)) + \
                            ([] if labeled_target is None else [10.0]*len(labeled_target))
            self.raw_weights = torch.tensor(weights, device=self.device)

        if not hasattr(self, "abs_weights"):
            self.abs_weights = self.raw_weights.clone()#.sigmoid()

        step = 0
        self.annotate(model, unlabeled_target)
        expand_idxs_Cur = self.entrophy_select(unlabeled_target)
        self.ConstructExpandData(unlabeled_target, expand_idxs_Cur)
        for epoch in range(max_epoch):
            for batch, indices in self.DataIter(unlabeled_target, labeled_source=labeled_source,
                                                 labeled_target=labeled_target, batch_size=self.batch_size):
                weights = self.abs_weights[indices].clone()
                meta_step_count, new_weights = self.MetaStep(model, meta_optim, batch, weights)
                if new_weights is not None:
                    self.abs_weights[indices] = new_weights
                    self.OptimStep(model, model_optim, batch, new_weights)
                else:
                    loss, acc = self.lossAndAcc(model, batch)
                    loss.backward()
                    model_optim.step()

                if (step+1) % max_valid_every == 0:
                    self.valid(model, unlabeled_target, UT_Label, self.suffix, step)
                else:
                    print(f"step = {step}")
                step += 1

class MetaSelfTrainV8(MetaSelfTrainingV7):
    def __init__(self, seed, alpha, topk, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainV8, self).__init__(seed, class_num, log_dir, suffix, weight_eta, lr4model,
                                coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step, extra_step)
        self.alpha = alpha
        self.topK = topk

    def annotate(self, model: RumorDetection, data:MSTDataset, pseaudo_idxs=[], batch_size=20):
        c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
        with torch.no_grad():
            pred_tensor = self.dataset_logits(model, data, idxs=c_idxs, batch_size=batch_size)
            if not hasattr(data, "logits"):
                data.logits = torch.zeros([len(data), self.class_num], device=pred_tensor.device)
            data.logits[c_idxs] = pred_tensor

        topk_vals, _ = pred_tensor.topk(int(self.topK*len(pred_tensor)), dim=0)
        self.lambda_k = topk_vals[-1]
        pseudo_label = (pred_tensor/self.lambda_k).pow(1.0/self.alpha)
        pseudo_label = (pseudo_label/(pseudo_label.sum(dim=1).unsqueeze(-1))).cpu().numpy()
        data.setLabel(pseudo_label, c_idxs)

    def LossList(self, model, batch):
        preds = model.predict(batch)
        # ------------------------------------------------------------#
        # to prevent extremely confident prediction, i.e., [1.0, 0.0],
        # as it can lead to a 'nan' value in log operation
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()
        labels = batch[-2]
        loss = (-1.0 *
                (preds.log()) *
                (labels.to(preds.device))
                ).sum(dim=1)
        return loss

class MetaSelfTrainV9(MetaSelfTrainV8):
    def __init__(self, seed, alpha, topk, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainV9, self).__init__(seed, alpha, topk, class_num, log_dir, suffix, weight_eta, lr4model,
                 coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step, extra_step)

    def lossAndAcc(self, model, batch, label_weight=None, reduction=None):
        preds = model.predict(batch)
        # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()
        labels = batch[-2]
        if labels.dim() == 1:
            loss = F.nll_loss(preds.log(), labels.to(preds.device), weight=label_weight, reduction=reduction)
        elif labels.dim() == 2:
            if label_weight is None:
                loss = (-1.0 *
                        (preds.log()) *
                        labels.to(preds.device)
                        ).sum(dim=1).mean()
            else:
                w_labels = (labels.to(preds.device)) * label_weight
                loss = (-1.0 *
                        (preds.log()) *
                         w_labels
                        ).sum(dim=1).sum()
            labels = labels.argmax(dim=1)
        else:
            pass
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

class MetaSelfTrainV11(MetaSelfTrainV9):
    def __init__(self, seed, alpha, topk, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainV11, self).__init__(seed, alpha, topk, class_num, log_dir, suffix, weight_eta, lr4model,
                 coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step, extra_step)
        self.minMetaLoss = []

    def obtainOptim(self, model:RumorDetection, learning_rate):
        optimizerGroupedParameters = [
            {'params': model.sent2vec.parameters(), 'lr': self.lr4model*0.01},
            {'params': model.prop_model.parameters(), 'lr': self.lr4model*0.1},
            {'params': model.rdm_cls.parameters(), 'lr': self.lr4model}
        ]
        optim = torch.optim.SGD(optimizerGroupedParameters)
        return optim

    def Batch2ExpandData(self, f_batch=None, e_batch=None):
        # extract out the expand data
        if f_batch is not None:
            self.few_shot_data = f_batch
        if e_batch is not None:
            print("- - - - - update expand data - - ->")
            self.e_label_weight = torch.tensor(
                [1.0 / len(e_batch[-2]) for _ in range(self.class_num)],
                device=self.device
            )
            self.expand_data_list = [e_batch]

    def MetaStep(self, model:RumorDetection, optim:torch.optim, batch,
                    weight:torch.Tensor, few_data=None, few_data_list=None):
        if (few_data is None) and (few_data_list is None):
            assert hasattr(self, "few_shot_data")
            assert hasattr(self, "few_shot_data_list")
            few_data, few_data_list = self.few_shot_data, self.few_shot_data_list

        self.epsilon = 1e-5
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        weight_best = None
        minMetaLoss = -1 if len(self.minMetaLoss) < 10 else np.mean(self.minMetaLoss)
        PreMetaLoss = -1
        optim_step = self.meta_step
        for step in range(self.meta_step):
            u = weight#.sigmoid()
            fewLoss, fewAcc = self.MetaValidation(model, optim, batch, u, few_data, few_data_list)
            if step == 0:
                PreMetaLoss = fewLoss
                if minMetaLoss==-1:
                    minMetaLoss = fewLoss

            if fewLoss < minMetaLoss:
                minMetaLoss = PreMetaLoss = fewLoss
                # If the PreMetaLoss is not updated, then the weight_best might be updated after the few loss is worser
                weight_best = weight.clone()
                if optim_step == self.meta_step:
                    optim_step = step
            elif fewLoss < PreMetaLoss:
                PreMetaLoss = fewLoss
                weight_best = weight.clone()

            print(f"##Perf on Meta Val Set## {step} | {self.meta_step} :  \
                        loss/acc = {fewLoss}/{fewAcc}, PreFewLoss={np.mean(self.minMetaLoss)}")
            if step == 0:
                fitlog.add_metric({"meta_valid_loss" : fewLoss}, step=self.counter)
                fitlog.add_metric({"meta_valid_acc" : fewAcc}, step=self.counter)
                self.counter += 1

            if (step == optim_step + self.extra_step) or ((step+1)==self.meta_step):
                break

            model.load_state_dict(initStateDicts)
            u_grads = self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            # print("u_grads : ", u_grads)
            w_grads = u_grads#*u*(1-u)
            weightGrads = -1*w_grads / (w_grads.norm(2) + 1e-8)
            # weightGrads = -1* w_grads#.sign()
            update = self.weight_eta * weightGrads
            weight = weight - update#*(weight_mask.to(update.device))

        u = weight#.sigmoid()
        fewLoss, fewAcc = self.MetaValidation(model, optim, batch, u, few_data, few_data_list)
        if fewLoss < minMetaLoss:
            minMetaLoss = PreMetaLoss = fewLoss
            weight_best = weight.clone()
        elif fewLoss < PreMetaLoss:
            PreMetaLoss = fewLoss
            weight_best = weight.clone()

        model.load_state_dict(initStateDicts)
        if minMetaLoss < PreMetaLoss:
        # the MinMetaLoss is not updated, so its value is the last mean value of self.minMetaLoss
            self.minMetaLoss.append(PreMetaLoss)
        else:
            self.minMetaLoss.append(minMetaLoss)
        if len(self.minMetaLoss) > 10:
            self.minMetaLoss.pop(0)
        return step+1, weight_best

    def Training(self, model:RumorDetection, unlabeled_target:MSTDataset, valid_set:MSTDataset, UT_Label,
                 labeled_source:MSTDataset=None, labeled_target:MSTDataset=None,
                 max_epoch=100, max_valid_every=100, few_update_every=10,
                 model_file="./tmp.pkl"):
        self.initTrainingEnv()
        meta_optim = torch.optim.SGD(model.parameters(), lr=self.lr4model)
        model_optim = self.obtainOptim(model, self.lr4model)
        _, _ = self.FewShotDataList(valid_set)
        self.few_shot_data, self.few_shot_data_list = None, None

        if not hasattr(self, "raw_weights"):
            weights = [0.0]*len(unlabeled_target) + \
                        ([] if labeled_source is None else [0.0]*len(labeled_source)) + \
                            ([] if labeled_target is None else [10.0]*len(labeled_target))
            self.raw_weights = torch.tensor(weights, device=self.device)

        step = 0
        self.annotate(model, unlabeled_target)
        for epoch in range(max_epoch):
            for batch, indices, e_batch in DataIter(unlabeled_target, labeled_source=labeled_source,
                                                 labeled_target=labeled_target, batch_size=self.batch_size):
                f_batch = valid_set.collate_raw_batch(
                    [valid_set[kk] for kk in random.sample(range(len(valid_set)), self.max_few_shot_size)]
                )
                self.Batch2ExpandData(f_batch, e_batch)
                weights = self.raw_weights[indices].clone()
                meta_step_count, new_weights = self.MetaStep(model, meta_optim, batch, weights)
                if new_weights is not None:
                    self.raw_weights[indices] = new_weights
                    self.OptimStep(model, model_optim, batch, new_weights)
                else:
                    loss, acc = self.lossAndAcc(model, batch)
                    loss.backward()
                    model_optim.step()

                if (step+1) % max_valid_every == 0:
                    self.valid(model, unlabeled_target, UT_Label, self.suffix, step)
                else:
                    print(f"step = {step}")
                step += 1

