import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import MetaEvaluator, WeightedAcc
from torch.utils.data import DataLoader
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_score, \
            recall_score, f1_score,precision_recall_fscore_support
import pickle
import torch
import torch.nn as nn
import random
import torch.nn.functional as F
import os
import fitlog

def pred_Logits(model:RumorDetection, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in range(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:RumorDetection, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, 1], vals[:, 1]

def expandPseaudoSet(model1, model2, unlabeled, skip_idxs=None, threshold=0.95, max_cnt=50):
    if skip_idxs is None:
        c_idxs = list(range(len(unlabeled)))
    else:
        c_idxs = list(set(range(len(unlabeled))) - set(skip_idxs))
    pred_1, conf_1 = prediction(model1, unlabeled, c_idxs)
    pred_2, conf_2 = prediction(model2, unlabeled, c_idxs)
    pred_eq = (pred_1 - pred_2).abs().__eq__(0)
    valid_conf_1 = conf_1.__gt__(threshold) & pred_eq
    valid_conf_2 = conf_2.__gt__(threshold) & valid_conf_1
    expand_idxs = torch.tensor(c_idxs, device=valid_conf_2.device)[valid_conf_2]
    if len(expand_idxs) > max_cnt:
        conf_f1 = 2*conf_2*conf_1/(conf_2+conf_1)
        sort_idxs = conf_f1[valid_conf_2].argsort()[-max_cnt:]
        expand_idxs = expand_idxs[sort_idxs].tolist()
    else:
        expand_idxs = expand_idxs.tolist()
    return expand_idxs

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:RumorDetection, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def WeakLabeling(model:RumorDetection, data, pseaudo_idxs=[], batch_size=20):
    c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
    pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
    confs, preds = pred_tensor.sort(dim=1)
    weak_label = (pred_tensor > 0.5).long().tolist()
    for i, idx in enumerate(c_idxs):
        data.data_y[idx] = weak_label[i]
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[c_idxs] = (confs.log().abs() * confs).sum(dim=1)
    return entrophy, preds[:, 1], confs[:, 1]

def obtain_model(tfidf_vec):
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def AugLoss(model:BiGCNRumorDetec, batch):
    preds = model.AugPredict(batch)
    epsilon = torch.ones_like(preds) * 1e-8
    preds = (
                preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
    labels = batch[-2].to(model.device)
    loss = F.nll_loss(preds.log(), labels)
    acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
    return loss, acc

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix):
    fs_set = BiGCNTwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = BiGCNTwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = BiGCNTwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set

def balancedTrainingIter(weak_set, batch_size, valid_idxs):
    weak_set_label = torch.tensor(weak_set.data_y).argmax(dim=1)
    labels = weak_set_label[valid_idxs]
    valid_idxs = valid_idxs if isinstance(valid_idxs, torch.Tensor) else torch.tensor(valid_idxs)
    pos_idxs = valid_idxs[labels.__eq__(1)].tolist()
    neg_idxs = valid_idxs[labels.__eq__(0)].tolist()
    print(f"init ratio: {len(pos_idxs) * 1.0 / len(neg_idxs)}")
    source_labels = torch.tensor(old_domain.data_y).argmax(dim=1)
    if len(pos_idxs) > len(neg_idxs):
        major_idxs = random.sample(pos_idxs, len(pos_idxs))
        max_minor_idx = max(neg_idxs)
        source_idxs = torch.arange(len(old_domain)) + max_minor_idx
        source_negs = source_idxs[source_labels.__eq__(0)].tolist()
        minor_idxs = neg_idxs + random.sample(source_negs, len(pos_idxs) - len(neg_idxs))
    else:
        major_idxs = random.sample(neg_idxs, len(neg_idxs))
        max_minor_idx = max(neg_idxs)
        source_idxs = torch.arange(len(old_domain)) + max_minor_idx
        source_negs = source_idxs[source_labels.__eq__(1)].tolist()
        minor_idxs = pos_idxs+ random.sample(source_negs, len(neg_idxs) - len(pos_idxs))
    major_size, minor_size = len(major_idxs), len(minor_idxs)
    for i in range(0, major_size, batch_size//2):
        major_instances = [weak_set[major_idx] for major_idx in major_idxs[i:min(i+batch_size//2, major_size)]]
        minor_instances = [old_domain[minor_idx - max_minor_idx] if minor_idx > max_minor_idx else weak_set[minor_idx]
                                for minor_idx in minor_idxs[i:min(i+batch_size//2, minor_size)]]
        batch = weak_set.collate_raw_batch(major_instances + minor_instances)
        yield batch

class MetaSelfTrainer(MetaEvaluator):
    def __init__(self, model: RumorDetection, weak_set, few_shot_set,
                weak_set_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, scale_lr4model=1e-3, coeff4expandset=1.0, max_few_shot_size=100,
                batch_size=5):
        super(MetaSelfTrainer, self).__init__(model, weak_set, few_shot_set,
                                               weak_set_label, exp_idxs, weak_set_weights,
                                               convey_fn, lr4model, scale_lr4model, coeff4expandset,
                                               max_few_shot_size, batch_size)
        self.expand_batch = []

    def LogSelectionInfo(self, e_arr, valid_idxs=None):
        indices = torch.arange(len(self.weak_set))
        print(">>>>>>>MetaEvaluate Message>>>>>>>>>>>>>>>")
        pos_indices = valid_idxs if valid_idxs is not None else indices[self.weak_set_weights.__gt__(0.0)]
        labels, preds = self.weak_set_label[indices], torch.tensor(self.weak_set.data_y)[
            indices].argmax(dim=1)
        print(len(indices))
        print(len(pos_indices))
        print(e_arr.mean(), e_arr[pos_indices].mean())
        print(accuracy_score(labels, preds), accuracy_score(labels[pos_indices], preds[pos_indices]))
        print(precision_score(labels, preds), precision_score(labels[pos_indices], preds[pos_indices]))
        print(recall_score(labels, preds), recall_score(labels[pos_indices], preds[pos_indices]))
        print(f1_score(labels, preds), f1_score(labels[pos_indices], preds[pos_indices]))
        print("<<<<<<<<<<<<<<<<<MetaEvaluate Message<<<<<<<<<<<<")

    def Training(self, entrophys, max_epoch=1, batch_size=32, max_meta_steps=10,
                    lr4weights=0.1, meta_lr4model=1e-1, meta_scale_lr4model=5e-3,
                        pseaudo_idxs=[]):
        tmp = (self.lr4model, self.scale_lr4model)
        self.lr4model, self.scale_lr4model = meta_lr4model, meta_scale_lr4model
        valid_idxs = self.PopOut(max_epochs=1, max_meta_steps=max_meta_steps,
                                    lr4weights=lr4weights, pseaudo_idxs=pseaudo_idxs,
                                        pop_ratio=0.2) # ferguson 上是0.1, sydney上是0.05
        self.lr4model, self.scale_lr4model = tmp[0], tmp[1]
        if len(valid_idxs) == 0:
            print("===> no valid idxs")
            return valid_idxs
        self.LogSelectionInfo(entrophys, valid_idxs=valid_idxs)
        self.batch_size = batch_size
        train_idxs = valid_idxs + pseaudo_idxs
        labels, preds = self.weak_set_label[train_idxs], \
                        torch.tensor(self.weak_set.data_y)[train_idxs].argmax(dim=1)
        print("trainSet perf:", acc_P_R_F1(labels, preds))
        for epoch in range(max_epoch):
            sum_loss = 0.
            train_idxs = random.sample(train_idxs, len(train_idxs))
            for start in range(0, len(train_idxs), self.batch_size):
                batch = self.weak_set.collate_raw_batch([
                    self.weak_set[jj] for jj in range(start, min(start+self.batch_size, len(train_idxs)), 1)
                ])
                cost, acc = AugLoss(self.model, batch)
                self.model.zero_grad()
                self.model_optim.zero_grad()
                cost.backward()
                self.model_optim.step()
                torch.cuda.empty_cache()
                print('####Model Update (%3d | %3d) ####, loss = %6.8f, acc = %6.8f' % (
                    start, len(train_idxs), cost.data, acc
                ))
                sum_loss += cost.data
                start += self.batch_size
            mean_loss = (sum_loss * 1.0) / ((len(train_idxs) // self.batch_size) + 1)
            print("mean loss:", mean_loss)
            if mean_loss < 0.1:  # early stop
                break
        return valid_idxs

    def BalancedTraining(self, entrophys, max_epoch=1, batch_size=32, max_meta_steps=10,
                            lr4weights=0.1, meta_lr4model=1e-1, meta_scale_lr4model=5e-3,
                                pseaudo_idxs=[]):
        tmp = (self.lr4model, self.scale_lr4model)
        self.lr4model, self.scale_lr4model = meta_lr4model, meta_scale_lr4model
        valid_idxs = self.PopOut(max_epochs=1, max_meta_steps=max_meta_steps,
                                    lr4weights=lr4weights, pseaudo_idxs=pseaudo_idxs,
                                        pop_ratio=0.2) # ferguson 上是0.1, sydney上是0.05
        self.lr4model, self.scale_lr4model = tmp[0], tmp[1]
        if len(valid_idxs) == 0:
            print("===> no valid idxs")
            return valid_idxs
        self.LogSelectionInfo(entrophys, valid_idxs=valid_idxs)
        self.batch_size = batch_size
        max_pseudo_instances = min(200, len(pseaudo_idxs))
        # to keep the diversity of the training instance in every epoch
        train_idxs = valid_idxs + random.sample(pseaudo_idxs, max_pseudo_instances)
        labels, preds = self.weak_set_label[train_idxs], \
                        torch.tensor(self.weak_set.data_y)[train_idxs].argmax(dim=1)
        print("trainSet perf:", acc_P_R_F1(labels, preds))
        for epoch in range(max_epoch):
            start = 0
            sum_loss = 0.
            for batch in balancedTrainingIter(self.weak_set, self.batch_size, train_idxs):
                cost, acc = AugLoss(self.model, batch)
                self.model.zero_grad()
                self.model_optim.zero_grad()
                cost.backward()
                self.model_optim.step()
                torch.cuda.empty_cache()
                print('####Model Update (%3d | %3d) ####, loss = %6.8f, acc = %6.8f' % (
                    start, len(train_idxs), cost.data, acc
                ))
                sum_loss += cost.data
                start += self.batch_size
            mean_loss = (sum_loss * 1.0) / ((len(train_idxs) // self.batch_size) + 1)
            print("mean loss:", mean_loss)
            if mean_loss < 0.1:  # early stop
                break
        return valid_idxs


def test_model(model, test_set, test_label, test_suffix, step=0):
    rst_model = Perf(model, test_set, test_label)
    acc_v, (p_v, r_v, f1_v, _) = rst_model
    print(f"Original Performance of {test_suffix}:", rst_model)
    fitlog.add_best_metric({f"Original_{test_suffix}":
                                   {"valid_acc": acc_v, "valid_prec": p_v[1],
                                    "valid_recall": r_v[1], "valid_f1": f1_v[1],
                                               }})
    fitlog.add_metric({f"{test_suffix}":
                           {"valid_acc": acc_v, "valid_prec": p_v[1],
                            "valid_recall": r_v[1], "valid_f1": f1_v[1],
                            }}, step=step)

def train_loop(model, optim, dataset, valid_indices, max_epoch, batch_size):
    for epoch in range(max_epoch):
        start = 0
        sum_loss = 0.
        counter = 0
        for idxs, batch in balancedTrainingIter(dataset, batch_size, valid_indices):
            cost, acc = AugLoss(model, batch)
            model.zero_grad()
            optim.zero_grad()
            cost.backward()
            optim.step()
            torch.cuda.empty_cache()
            print('####Model Update (%3d) ####, loss = %6.8f, acc = %6.8f' % (
                start, cost.data, acc
            ))
            sum_loss += cost.data
            start += batch_size
            counter += 1
        mean_loss = (sum_loss * 1.0) / counter
        print("mean loss:", mean_loss)
        if mean_loss < 0.2:  # early stop
            break

def ModelTrain(unlabeled_set, model1, model2, max_epoch, valid_indices, seed1=1, seed2=2):
    random.seed(seed1)
    torch.manual_seed(seed1)
    torch.cuda.manual_seed_all(seed1)
    random.seed(seed1)
    torch.backends.cudnn.deterministic = True
    optim1 = torch.optim.Adam([
        {'params': model1.sent2vec.parameters(), 'lr': 5e-5 },
        {'params': model1.prop_model.parameters(), 'lr': 2e-5},
        {'params': model1.rdm_cls.parameters(), 'lr': 2e-5}
    ]
    )
    print("========train model 1=====>")
    train_loop(model1, optim1,
               unlabeled_set, valid_indices,
               max_epoch, 32)

    random.seed(seed2)
    torch.manual_seed(seed2)
    torch.cuda.manual_seed_all(seed2)
    random.seed(seed2)
    torch.backends.cudnn.deterministic = True
    optim = torch.optim.Adam([
        {'params': model2.sent2vec.parameters(), 'lr': 5e-5 },
        {'params': model2.prop_model.parameters(), 'lr': 2e-5},
        {'params': model2.rdm_cls.parameters(), 'lr': 2e-5}
    ]
    )
    print("========train model 2=====>")
    train_loop(model2, optim,
               unlabeled_set, valid_indices,
               max_epoch, 32)

def MetaSelfTrain(tr_model, anno_model, f_set, weak_set, weak_set_label, p_idxs, e_idxs):
    entrophy, preds, logits = WeakLabeling(anno_model, weak_set, pseaudo_idxs=p_idxs + e_idxs)
    idxs = expandPseaudoSet(tr_model, anno_model, weak_set, p_idxs + e_idxs, threshold=0.95)
    p_idxs.extend(idxs)
    trainer = MetaSelfTrainer(tr_model, weak_set, f_set, weak_set_label,
                                 exp_idxs=e_idxs, convey_fn=None, lr4model=5e-2,
                                   scale_lr4model=4e-2, max_few_shot_size=100, batch_size=20)
    max_meta_steps = 10
    if len(e_idxs)>100:
        print("expand_idxs length:", len(e_idxs))
        trainer.ConstructExpandData(batch_size=100)
        max_meta_steps = 5
    valid_idxs = trainer.BalancedTraining(entrophy, max_epoch=10, batch_size=32,
                                                max_meta_steps=max_meta_steps,
                                                    lr4weights=0.1, meta_lr4model=2e-2,
                                                        meta_scale_lr4model=2e-3,
                                                            pseaudo_idxs=p_idxs)
    if len(valid_idxs) == 0:
        return valid_idxs, p_idxs

    rst_model1 = Perf(tr_model, weak_set, weak_set_label)
    print("%3d | %3d Post-MetaTrain Performance of model1:", rst_model1)
    pseaudo_labels = torch.tensor(weak_set.data_y).argmax(dim=1)
    rst_s = acc_P_R_F1(weak_set_label[valid_idxs],
                    pseaudo_labels[valid_idxs])
    print("###Accuracy On selected instancees", rst_s)
    rst_p = acc_P_R_F1(weak_set_label[p_idxs],
                       pseaudo_labels[p_idxs])
    print("###Accuracy On pseaudo instances", rst_p)
    rst_t = acc_P_R_F1(weak_set_label[p_idxs + valid_idxs],
                       pseaudo_labels[p_idxs + valid_idxs])
    print("###Accuracy On training instances", rst_t)
    return valid_idxs, p_idxs

BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.74.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.68.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
               ]

BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.75.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
               ]
domain_ID = 0
few_shot_cnt = 40
few_shot_set, old_domain, new_domain = obtain_Domain_set(
                                            f"../../data/twitter_fs{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_od{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_nd{domain_ID}_{few_shot_cnt}"
                                        )
Tf_Idf_twitter_file = "../../saved/TfIdf_twitter.pkl"
if os.path.exists(Tf_Idf_twitter_file):
    with open(Tf_Idf_twitter_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    lemma = Lemma_Factory()
    corpus = [" ".join(lemma(txt)) for data in [few_shot_set, old_domain, new_domain]
                                    for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_twitter_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)


model1 = obtain_model(tv)
model2 = obtain_model(tv)

log_dir = str(__file__).rstrip(".py")
# log_dir = "MetaSelfTrain_0"
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)


fitlog.set_log_dir("%s/" % log_dir, new_log=True)

new_domain_name = new_domain.data[new_domain.data_ID[0]]['event']
new_domain_label = torch.tensor(new_domain.data_y).argmax(dim=1)

model1.load_model(BiGCN1_Paths[domain_ID])
model2.load_model(BiGCN2_Paths[domain_ID])

pseaudo_idxs = []
unlabeled_set = new_domain

test_model(model1, unlabeled_set, new_domain_label, "model1")
test_model(model2, unlabeled_set, new_domain_label, "model2")

train_iter = 0
for epoch in range(10):
    for _ in range(60):
        train_iter += 1
        v_idxs, pseaudo_idxs = MetaSelfTrain(model1, model2, few_shot_set, unlabeled_set,
                                                              new_domain_label, pseaudo_idxs, [])
        rest_cnt = len(unlabeled_set) - len(set(pseaudo_idxs))
        if (rest_cnt*1.0/len(unlabeled_set)) < 0.05:
            break

        v_idxs, pseaudo_idxs = MetaSelfTrain(model2, model1, few_shot_set, unlabeled_set,
                                                              new_domain_label, pseaudo_idxs, [])
        rest_cnt = len(unlabeled_set) - len(set(pseaudo_idxs))
        if (rest_cnt * 1.0 / len(unlabeled_set)) < 0.05:
            break

    weak_label = torch.tensor(unlabeled_set.data_y).argmax(dim=1)
    print("#####pseaudo perf:", acc_P_R_F1(new_domain_label[pseaudo_idxs], weak_label[pseaudo_idxs]))
    ModelTrain(unlabeled_set, model1, model2, max_epoch=20,
               valid_indices=pseaudo_idxs,
               seed1=10086, seed2=10010)
    pseaudo_idxs = []