import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Lemma_Factory
from Dataloader.twitterloader import  BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import InstanceReweightingV3
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score,precision_recall_fscore_support
import torch, torch.nn as nn, torch.nn.functional as F
import pickle, random, os, fitlog
# from multiprocessing import Queue
from queue import Queue
import threading
from concurrent.futures import ProcessPoolExecutor
from tqdm import trange
import math
import multiprocessing
import numpy as np

class SimpleLoader:
    def __init__(self, pseudo_target, labeled_source=None, labeled_target=None,
                 batch_size=32):
        self.initDataIterConfig(pseudo_target, labeled_source, labeled_target)
        self.batch_size = batch_size
        self.idxs = random.sample(range(self.data_size), self.data_size) * 2
        self.start_i = 0

    def initDataIterConfig(self, pseudo_target, labeled_source=None, labeled_target=None):
        if not hasattr(pseudo_target, 'valid_indexs'):
            pseudo_target.valid_indexs = list(range(len(pseudo_target)))
        if labeled_source is not None:
            if not hasattr(labeled_source, 'valid_indexs'):
                labeled_source.valid_indexs = list(range(len(labeled_source)))
        if labeled_target is not None:
            if not hasattr(labeled_target, 'valid_indexs'):
                labeled_target.valid_indexs = list(range(len(labeled_target)))
        self.pseudo_target, self.labeled_source, self.labeled_target = pseudo_target, labeled_source, labeled_target
        self.pt_len = len(pseudo_target)
        self.ls_len = 0 if labeled_source is None else len(labeled_source)
        self.lt_len = 0 if labeled_target is None else len(labeled_target)
        self.data_size = self.pt_len + self.ls_len + self.lt_len

    def read_pseudo_set(self, idx):
        if idx in self.pseudo_target.valid_indexs:
            item = self.pseudo_target[idx]
            return item
        else:
            return None

    def read_labeled_target(self, idx):
        if idx in self.labeled_target.valid_indexs:
            item = self.labeled_target[idx]
            return item
        else:
            return None

    def read_labeled_source(self, idx):
        if idx in self.labeled_source.valid_indexs:
            item = self.labeled_source[ idx ]
            return item
        else:
            return None

    def read(self, idx):
        if idx < self.pt_len:
            item = self.read_pseudo_set(idx)
            if item is not None:
                return (idx, item)
        elif self.ls_len != 0:
            if idx < (self.ls_len + self.pt_len):
                item = self.read_labeled_source(idx - self.pt_len)
                if item is not None:
                    return (idx, item)
            else:
                item = self.read_labeled_target(idx - self.ls_len - self.pt_len)
                if item is not None:
                    return (idx, item)
        else:
            item = self.read_labeled_target(idx - self.ls_len - self.pt_len)
            if item is not None:
                return (idx, item)
        return None

    def next(self):
        items, indices = [], []
        while True:
            try:
                rst = self.read(self.idxs[self.start_i])
            except:
                print("start_i : ", self.start_i)
                print("self.idxs : ", self.idxs)
                raise

            self.start_i += 1
            if rst is not None:
                indices.append(rst[0])
                items.append(rst[1])
            if len(items) == self.batch_size:
                return self.pseudo_target.collate_raw_batch(items), indices

    def __iter__(self):
        while self.start_i < self.data_size:
            yield self.next()

class SelfTrainningInterface:
    def __init__(self):
        pass

    def obtainOptim(self, tr_model, learning_rate):
        return torch.optim.Adam([
            {'params': tr_model.parameters(), 'lr': learning_rate}
        ])

    def acc_P_R_F1(self, y_true, y_pred):
        return accuracy_score(y_true, y_pred.cpu()), \
                    precision_recall_fscore_support(y_true, y_pred.cpu())

    def augPredict(self, model, batch):
        assert hasattr(self, 'isAug') and self.isAug
        rand = random.random()
        if rand < 0.2:
            model.sent2vec.set_aug_type("gaussian")
        elif rand < 0.4:
            model.sent2vec.set_aug_type("g_blur")
        elif rand < 0.6:
            self.isAug = False
            model.sent2vec.set_aug_type(None)
            loss, acc = self.lossAndAcc(model, batch)
            loss.backward()
            model.sent2vec.set_aug_type("adver")
            self.isAug = True
        elif rand < 0.8:
            model.sent2vec.set_aug_type("rMask")
        else:
            model.sent2vec.set_aug_type("rReplace")
        preds = model.AugPredict(batch)
        return preds

    def predict(self, model, batch):
        if hasattr(self, 'isAug'):
            if self.isAug:
                return self.augPredict(model, batch)
        return model.predict(batch)

    def dataset_logits(self, model: RumorDetection, data, idxs=None, batch_size=40):
        preds = []
        if idxs is None:
            idxs = list(range(len(data)))
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i + batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
        pred_tensor = torch.cat(preds)
        return pred_tensor

    def dataset_inference(self, model:RumorDetection, data, idxs=None, batch_size=20):
        pred_tensor = self.dataset_logits(model, data, idxs, batch_size)
        vals, idxs = pred_tensor.sort(dim=1)
        return idxs[:, -1], vals[:, -1]

    def perf(self, model:RumorDetection, data, label, idxs=None, batch_size=20):
        with torch.no_grad():
            y_pred, _ = self.dataset_inference(model, data, idxs=idxs, batch_size=batch_size)
        y_true = label[idxs] if idxs is not None else label
        return self.acc_P_R_F1(y_true, y_pred)

    def annotate(self, model: RumorDetection, data, pseaudo_idxs=[], batch_size=20):
        c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
        with torch.no_grad():
            pred_tensor = self.dataset_logits(model, data, idxs=c_idxs, batch_size=batch_size)
            if not hasattr(data, "logits"):
                data.logits = torch.zeros([len(data), pred_tensor.size(1)], device=pred_tensor.device)
            data.logits[c_idxs] = pred_tensor

            weak_label = (pred_tensor > 0.5).long().tolist()
            for i, idx in enumerate(c_idxs):
                data.data_y[idx] = weak_label[i]

class MetaSelfTrainingV3(InstanceReweightingV3, SelfTrainningInterface):
    def __init__(self, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainingV3, self).__init__(class_num, lr4model, coeff4expandset, max_few_shot_size, Inner_BatchSize)
        self.log_dir = log_dir
        fitlog.set_log_dir(log_dir)
        self.suffix = suffix
        self.weight_eta = weight_eta
        self.meta_step = meta_step
        self.extra_step = extra_step
        self.best_valid_acc = 0.0
        self.minMetaLoss = 1e8
        self.counter, self.valid_counter = 0, 0

    def valid(self, model, test_set, test_label, test_suffix, step=0):
        rst_model = self.perf(model, test_set, test_label)
        acc_v, (p_v, r_v, f1_v, _) = rst_model
        print("step = {} : ".format(self.valid_counter), rst_model)
        output_items = [("valid_acc", acc_v)] + \
                       [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                       [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                       [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
        fitlog.add_metric({f"{test_suffix}": dict(output_items)}, step=self.valid_counter)
        self.valid_counter += 1

    def MetaStep(self, model:RumorDetection, optim:torch.optim, batch,
                    weight:torch.Tensor):
        assert hasattr(self, "few_shot_data")
        assert hasattr(self, "few_shot_data_list")
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        weight_best = weight.clone()
        pre_meta_loss, minMetaLoss = 1e8, self.minMetaLoss
        optim_step = self.meta_step
        for step in range(self.meta_step):
            u = weight#.sigmoid()
            model.zero_grad()
            loss = self.LossList(model, batch)
            sumLoss = (u * loss).sum()
            sumLoss.backward()
            optim.step()
            self.val_grad_dicts, fewLoss, fewAcc = self.meanGradOnValSet(model,
                                                                         few_shot_data=self.few_shot_data,
                                                                         few_shot_data_list=self.few_shot_data_list)
            print(f"##Perf on Meta Val Set## {step} | {self.meta_step} :  loss/acc = {fewLoss}/{fewAcc}, PreFewLoss={self.minMetaLoss}")
            if fewLoss < self.minMetaLoss:
                self.minMetaLoss = fewLoss
                weight_best = weight.clone()
                if optim_step == self.meta_step:
                    optim_step = step

            if step == 0:
                fitlog.add_metric({"meta_valid_loss" : fewLoss}, step=self.counter)
                fitlog.add_metric({"meta_valid_acc" : fewAcc}, step=self.counter)
                self.counter += 1

            if step == optim_step + self.extra_step:
                break

            model.load_state_dict(initStateDicts)
            u_grads = self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            w_grads = u_grads#*u*(1-u)
            weightGrads = -1 * (w_grads / w_grads.norm(2))
            update = self.weight_eta * weightGrads
            weight = weight - update#*(weight_mask.to(update.device))

        model.load_state_dict(initStateDicts)
        if self.minMetaLoss < minMetaLoss:
        # global minMetaLoss is not updated in this meta_optim process
            return step+1, weight_best
        else:
            return step+1, None

    def OptimStep(self, model, model_optim, batch, weight):
        loss = self.LossList(model, batch)
        # sumLoss = ((weight.sigmoid()) * loss).sum()
        sumLoss = (weight * loss).sum()
        model_optim.zero_grad()
        sumLoss.backward()
        model_optim.step()

    def Training(self, model:RumorDetection, unlabeled_target:BiGCNTwitterSet, valid_set:BiGCNTwitterSet, UT_Label,
                 labeled_source:BiGCNTwitterSet=None, labeled_target:BiGCNTwitterSet=None,
                 max_epoch=100, max_valid_every=100, model_file="./tmp.pkl"):

        model_optim = torch.optim.SGD(model.parameters(), lr=self.lr4model)
        scheduler_up = torch.optim.lr_scheduler.ExponentialLR(
            model_optim,
            gamma=2
        )
        scheduler_down = torch.optim.lr_scheduler.ExponentialLR(
            model_optim,
            gamma=0.5
        )

        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)

        weights = [0.0]*len(unlabeled_target) + \
                    ([] if labeled_source is None else [0.0]*len(labeled_source)) + \
                        ([] if labeled_target is None else [10.0]*len(labeled_target))

        self.train_set_weights = torch.tensor(weights, device=self.device)
        step = 0
        cur_lr4model = self.lr4model
        self.annotate(model, unlabeled_target)
        for epoch in range(max_epoch):
            for batch, indices in SimpleLoader(unlabeled_target, labeled_source=labeled_source,
                                                 labeled_target=labeled_target, batch_size=self.batch_size):
                weights = self.train_set_weights[indices].clone()
                meta_step_count, new_weights = self.MetaStep(model, model_optim, batch, weights)

                if new_weights is not None:
                    self.train_set_weights[indices] = new_weights
                    self.OptimStep(model, model_optim, batch, new_weights)
                    if meta_step_count < 3:
                        if cur_lr4model < (self.lr4model/2):
                            scheduler_up.step()
                            cur_lr4model = scheduler_up.get_last_lr()[0]
                    elif meta_step_count > 30:
                        scheduler_down.step()
                        cur_lr4model = scheduler_down.get_last_lr()[0]
                else:
                    scheduler_down.step()
                    cur_lr4model = scheduler_down.get_last_lr()[0]

                if (step+1) % max_valid_every == 0:
                    self.valid(model, unlabeled_target, UT_Label, self.suffix, step)
                step += 1
        model.save_model(model_file)
        if step < max_valid_every:
            self.valid(model, unlabeled_target, UT_Label, self.suffix, step)

class MetaSelfTrainingV4(MetaSelfTrainingV3):
    def __init__(self, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainingV4, self).__init__(class_num, log_dir, suffix, weight_eta, lr4model, coeff4expandset,
                                                 max_few_shot_size, Inner_BatchSize, meta_step, extra_step)

    def ConstructExpandData(self, weak_set, expand_idxs):
        self.e_label_weight = torch.tensor([1.0 / len(expand_idxs) for _ in range(self.class_num)])
        # extract out the expand data
        print("- - - - - update expand data - - ->")
        self.expand_data_list = [weak_set.collate_raw_batch(
            [weak_set[idx] for idx in expand_idxs[i:min(i + self.max_few_shot_size, len(expand_idxs))]]
        ) for i in range(0, len(expand_idxs), self.max_few_shot_size)]

        # modify the valid_indexs,
        if not hasattr(weak_set, "valid_indexs"):
            weak_set.valid_indexs = [idx for idx in range(len(weak_set)) if not (idx in expand_idxs)]
        else:
            weak_set.valid_indexs = [idx for idx in weak_set.valid_indexs if not (idx in expand_idxs)]
        self.minMetaLoss = 1e8 # the valid set has changed, so we need to reset the global minMetaLoss

    def meanGradOnValSet(self, model, few_shot_data=None, few_shot_data_list=None):
        model.zero_grad()
        assert few_shot_data is not None or few_shot_data_list is not None
        if few_shot_data_list is None:
            loss, acc = self.lossAndAcc(model, few_shot_data, label_weight=self.f_label_weight, reduction="sum")
            loss.backward()
            loss = loss.data.item()
        else:
            print("-------> few shot data list ------>")
            f_loss_list, f_acc_list = [], []
            for i, few_data in enumerate(few_shot_data_list):
                # sum the loss with the averaged weights or the biased weights
                # will allow computing the mean risk on any size of few shot dataset
                f_loss, f_acc = self.lossAndAcc(model, few_data, label_weight=self.f_label_weight, reduction="sum")
                f_loss.backward()
                f_loss_list.append(f_loss.data.item())
                f_acc_list.append(f_acc.item())
                torch.cuda.empty_cache()
            loss, acc = np.sum(f_loss_list), np.mean(f_acc_list)

        if self.expand_data_list is not None:
            if len(self.expand_data_list) > 0:
                assert hasattr(self, 'e_label_weight')
                print("-------> expand data list ------>")
                e_loss_list, e_acc_list = [], []
                for i, e_data in enumerate(self.expand_data_list):
                    e_loss, e_acc = self.lossAndAcc(model, e_data, label_weight=self.e_label_weight, reduction="sum")
                    (e_loss * self.coefff4expandset).backward()
                    e_loss_list.append(e_loss.data.item())
                    e_acc_list.append(e_acc.item())
                    torch.cuda.empty_cache()
                exp_loss, exp_acc = np.sum(e_loss_list), np.mean(e_acc_list)
                print(f"##Perf on Meta Expand Set##  : exp_loss/exp_acc = {exp_loss}/{exp_acc}")
                grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
                return grad_dicts, \
                       loss + self.coefff4expandset*exp_loss, \
                       acc
        grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
        return grad_dicts, loss, acc

    def random_select(self, indices, unlabeled_target):
        return [index for index in indices \
                     if (index < len(unlabeled_target)) and (random.random()<0.2)]

    def entrophy_select(self, indices, unlabeled_target):
        assert hasattr(unlabeled_target, "logits")
        entrophy = (-1 * unlabeled_target.logits[indices] * (unlabeled_target.logits[indices].log())).sum(dim=1)
        topK_idxs = entrophy.argsort()[
                        0 : math.ceil(len(indices)*0.2)
                    ].tolist()
        return [indices[idx] for idx in topK_idxs]

    def weights_select(self, indices):
        topK_idxs = self.train_set_weights[indices].topk(
                        math.ceil(len(indices)*0.2)
                    )[1].tolist()
        return [indices[idx] for idx in topK_idxs]

    def mixed_select(self, indices, unlabeled_target):
        entrophy = (-1 * unlabeled_target.logits[indices] * (unlabeled_target.logits[indices].log())).sum(dim=1)
        score = (len(indices) - 1 - entrophy.argsort() + self.train_set_weights[indices].argsort()) / 2.0
        topK_idxs = score.topk(
                        math.ceil(len(indices)*0.2)
                    )[1].tolist()
        return [indices[idx] for idx in topK_idxs]

    def Training(self, model:RumorDetection, unlabeled_target:BiGCNTwitterSet, valid_set:BiGCNTwitterSet, UT_Label,
                 labeled_source:BiGCNTwitterSet=None, labeled_target:BiGCNTwitterSet=None,
                 max_epoch=100, max_valid_every=100, model_file="./tmp.pkl"):

        model_optim = torch.optim.SGD(model.parameters(), lr=self.lr4model)
        scheduler_up = torch.optim.lr_scheduler.ExponentialLR(
            model_optim,
            gamma=2
        )
        scheduler_down = torch.optim.lr_scheduler.ExponentialLR(
            model_optim,
            gamma=0.5
        )
        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)
        weights = [0.0]*len(unlabeled_target) + \
                    ([] if labeled_source is None else [0.0]*len(labeled_source)) + \
                        ([] if labeled_target is None else [10.0]*len(labeled_target))
        self.train_set_weights = torch.tensor(weights, device=self.device)
        step = 0
        expand_idxs_Pre, expand_idxs_Cur = [], []
        cur_lr4model = self.lr4model
        self.annotate(model, unlabeled_target)
        for epoch in range(max_epoch):
            for batch, indices in SimpleLoader(unlabeled_target, labeled_source=labeled_source,
                                                 labeled_target=labeled_target, batch_size=self.batch_size):
                weights = self.train_set_weights[indices].clone()
                meta_step_count, new_weights = self.MetaStep(model, model_optim, batch, weights)

                if new_weights is not None:
                    self.train_set_weights[indices] = new_weights
                    self.OptimStep(model, model_optim, batch, new_weights)
                    if meta_step_count < 3:
                        if cur_lr4model < (self.lr4model/2):
                            scheduler_up.step()
                            cur_lr4model = scheduler_up.get_last_lr()[0]
                    elif meta_step_count > 30:
                        scheduler_down.step()
                        cur_lr4model = scheduler_down.get_last_lr()[0]
                else:
                    scheduler_down.step()
                    cur_lr4model = scheduler_down.get_last_lr()[0]

                if (step+1) % max_valid_every == 0:
                    self.valid(model, unlabeled_target, UT_Label, self.suffix, step)
                else:
                    print(f"step = {step}")
                expand_idxs_Cur.extend(
                    # self.random_select(indices, unlabeled_target) # V4.0
                    # self.entrophy_select(indices, unlabeled_target) # V4.1
                    self.weights_select(indices) #V4.2
                    # self.mixed_select(indices, unlabeled_target) #V4.4
                )
                print(f"expand_idxs size : {len(expand_idxs_Cur)}")
                if len(expand_idxs_Cur) >= 100:
                    unlabeled_target.valid_indexs.extend(expand_idxs_Pre)
                    expand_idxs_Cur = random.sample(expand_idxs_Cur, 100)
                    expand_idxs_Pre = expand_idxs_Cur
                    self.ConstructExpandData(unlabeled_target, expand_idxs_Cur)
                    expand_idxs_Cur = []
                step += 1
        model.save_model(model_file)
        if step < max_valid_every:
            self.valid(model, unlabeled_target, UT_Label, self.suffix, step)

class MetaSelfTrainingV5(MetaSelfTrainingV4):
    def __init__(self, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainingV5, self).__init__(class_num, log_dir, suffix, weight_eta, lr4model, coeff4expandset,
                                                 max_few_shot_size, Inner_BatchSize, meta_step, extra_step)


    def FewShotDataList(self, few_shot_set):
        f_label = torch.tensor(few_shot_set.data_y).argmax(dim=1)
        cnt = [f_label.__eq__(y_idx).sum().data.item() for y_idx in range(self.class_num)]
        assert sum(cnt) == len(few_shot_set)
        self.f_label_weight = torch.tensor([1.0/y_cnt for y_cnt in cnt], device=self.device)

        if len(few_shot_set) > self.max_few_shot_size:
            few_shot_data = None
            few_shot_data_list = [few_shot_set.collate_raw_batch(
                                            [few_shot_set[j] for j in range(i,
                                                                                 min(i+self.max_few_shot_size,
                                                                                     len(few_shot_set)))])
                                            for i in range(0, len(few_shot_set), self.max_few_shot_size)]
        else:
            few_shot_data = few_shot_set.collate_raw_batch(
                [few_shot_set[i] for i in range(len(few_shot_set))]
            )
            few_shot_data_list = None
        return few_shot_data, few_shot_data_list

    def ConstructExpandData(self, weak_set, expand_idxs):

        e_label = torch.tensor(weak_set.data_y)[expand_idxs].argmax(dim=1)
        cnt = [e_label.__eq__(y_idx).sum().data.item() for y_idx in range(self.class_num)]
        assert sum(cnt) == len(e_label)
        if not all(cnt): # some class are missed in the expand data
            self.expand_data_list = None
            return
        self.e_label_weight = torch.tensor([1.0 / y_cnt if y_cnt > 0 else 0.0 for y_cnt in cnt], device=self.device)
        # self.e_label_weight = torch.tensor([1.0/len(expand_idxs) for _ in range(self.class_num)]) #V5.0

        # extract out the expand data
        print("- - - - - update expand data - - ->")
        self.expand_data_list = [weak_set.collate_raw_batch(
            [weak_set[idx] for idx in expand_idxs[i:min(i + self.max_few_shot_size, len(expand_idxs))]]
        ) for i in range(0, len(expand_idxs), self.max_few_shot_size)]

        # modify the valid_indexs,
        if not hasattr(self, "valid_indexs"):
            self.valid_indexs = [idx for idx in range(len(weak_set)) if not (idx in expand_idxs)]
        else:
            self.valid_indexs = [idx for idx in self.valid_indexs if not (idx in expand_idxs)]
        self.minMetaLoss = 1e8 # the valid set has changed, so we need to reset the global minMetaLoss

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix, lt_cnt=0):
    fs_set = BiGCNTwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = BiGCNTwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = BiGCNTwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    if lt_cnt > 0:
        nd_set_1, nd_set_2 = nd_set.split([lt_cnt*1.0 / len(nd_set), 1.0])
        return fs_set, od_set, nd_set_2, nd_set_1
    else:
        return fs_set, od_set, nd_set, None


def obtain_model(tfidf_vec) -> BiGCNRumorDetec:
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True, grad_preserve=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model


if __name__ == "__main__":
    BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
                   "../../saved/TFIDF_BiGCN_ferguson_0.75.pkl",
                   "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
                   "../../saved/TFIDF_BiGCN_ottawashooting_0.72.pkl",
                   "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
                   ]

    BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
                   "../../saved/TFIDF_BiGCN_ferguson_0.76.pkl",
                   "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
                   "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
                   "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
                   ]

    logDir = str(__file__).rstrip(".py")
    # logDir = "OnlineTest"
    if not os.path.exists(logDir):
        os.system("mkdir %s"%logDir)
    else:
        os.system("rm -rf %s" % logDir)
        os.system("mkdir %s" % logDir)

    fewShotCnt = 20
    domainID = 4
    validTarget, labeledSource, unlabeledTarget, labeledTarget = obtain_Domain_set(
                                                f"../../data/twitter_fs{domainID}_{fewShotCnt}",
                                                f"../../data/twitter_od{domainID}_{fewShotCnt}",
                                                f"../../data/twitter_nd{domainID}_{fewShotCnt}"
                                            )

    TfIdf_twitter_file = "../../saved/TfIdf_twitter.pkl"
    if os.path.exists(TfIdf_twitter_file):
        with open(TfIdf_twitter_file, "rb") as fr:
            tv = pickle.load(fr)
    else:
        lemma = Lemma_Factory()
        corpus = [" ".join(lemma(txt)) for data in [validTarget, labeledSource, unlabeledTarget, labeledTarget]
                                        for ID in data.data_ID for txt in data.data[ID]['text']]
        tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
        _ = tv.fit_transform(corpus)
        with open(TfIdf_twitter_file, "wb") as fw:
            pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

    model1 = obtain_model(tv)
    model1.load_model(BiGCN1_Paths[domainID])
    # model1.load_state_dict(torch.load(BiGCN1_Paths[domainID]))
    newDomainName = unlabeledTarget.data[unlabeledTarget.data_ID[0]]['event']
    ut_label = torch.tensor(unlabeledTarget.data_y).argmax(dim=1)
    trainer = MetaSelfTrainingV4(class_num=2, log_dir=logDir, suffix=f"{newDomainName}_FS{fewShotCnt}", weight_eta=0.2,
                                 lr4model=5e-5, coeff4expandset=0.1,  max_few_shot_size=20, Inner_BatchSize=20, meta_step=50)
    trainer.valid(model1, unlabeledTarget, ut_label, trainer.suffix, 0)
    for _ in range(20):
        trainer.Training(model1, unlabeledTarget, validTarget, ut_label, None, labeledTarget,
                         max_epoch=5, max_valid_every=30)