import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import MetaEvaluatorV2, WeightedAcc
from torch.utils.data import DataLoader
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_score, \
            recall_score, f1_score,precision_recall_fscore_support
from torch.utils.data import WeightedRandomSampler
import pickle
import torch
import numpy as np
import torch.nn as nn
import random
import os
import fitlog
import math

def pred_Logits(model:RumorDetection, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in range(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:RumorDetection, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, 1], vals[:, 1]

def expandPseaudoSet(model1, model2, unlabeled, skip_idxs=None, threshold=0.95, max_cnt=50):
    if skip_idxs is None:
        c_idxs = list(range(len(unlabeled_set)))
    else:
        c_idxs = list(set(range(len(unlabeled_set))) - set(skip_idxs))
    pred_1, conf_1 = prediction(model1, unlabeled, c_idxs)
    pred_2, conf_2 = prediction(model2, unlabeled, c_idxs)
    pred_eq = (pred_1 - pred_2).abs().__eq__(0)
    valid_conf_1 = conf_1.__gt__(threshold) & pred_eq
    valid_conf_2 = conf_2.__gt__(threshold) & valid_conf_1
    expand_idxs = torch.tensor(c_idxs, device=valid_conf_2.device)[valid_conf_2]
    if len(expand_idxs) > max_cnt:
        conf_f1 = 2*conf_2*conf_1/(conf_2+conf_1)
        sort_idxs = conf_f1[valid_conf_2].argsort()[-max_cnt:]
        expand_idxs = expand_idxs[sort_idxs].tolist()
    else:
        expand_idxs = expand_idxs.tolist()
    return expand_idxs

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:RumorDetection, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def WeakLabeling(model:RumorDetection, data, pseaudo_idxs=[], batch_size=20):
    c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
    pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
    confs, preds = pred_tensor.sort(dim=1)
    weak_label = (pred_tensor > 0.5).long().tolist()
    for i, idx in enumerate(c_idxs):
        data.data_y[idx] = weak_label[i]
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[c_idxs] = (confs.log().abs() * confs).sum(dim=1)
    return entrophy, preds[:, 1], confs[:, 1]

def obtain_model(tfidf_vec):
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix):
    fs_set = BiGCNTwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = BiGCNTwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = BiGCNTwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set

def balancedTrainingIter(weak_set, batch_size, valid_idxs=None):
    labels = torch.tensor(weak_set.data_y).argmax(dim=1)[valid_idxs]
    valid_idxs = valid_idxs if isinstance(valid_idxs, torch.Tensor) else torch.tensor(valid_idxs)
    pos_idxs = valid_idxs[labels.__eq__(1)].tolist()
    neg_idxs = valid_idxs[labels.__eq__(0)].tolist()
    if len(pos_idxs) > len(neg_idxs):
        max_size = len(pos_idxs)
        neg_idxs = neg_idxs*(len(pos_idxs)//len(neg_idxs)) + \
                        random.sample(neg_idxs, len(pos_idxs)%len(neg_idxs))
    else:
        max_size = len(neg_idxs)
        pos_idxs = pos_idxs * (len(neg_idxs) // len(pos_idxs)) + \
                        random.sample(pos_idxs, len(neg_idxs) % len(pos_idxs))
    for i in range(0, max_size, batch_size//2):
        training_idxs = pos_idxs[i:min(max_size, i+batch_size//2)] + \
                            neg_idxs[i:min(max_size, i + batch_size // 2)]
        yield weak_set.collate_raw_batch([weak_set[j] for j in training_idxs])

def AugRDMLoss(model:RumorDetection, batch):
    rand = random.random()
    if rand < 0.2:
        model.sent2vec.set_aug_type("gaussian")
    elif rand < 0.4:
        model.sent2vec.set_aug_type("g_blur")
    elif rand < 0.6:
        model.sent2vec.set_aug_type("adver")
        loss, acc = model.RDMLoss(batch)
        loss.backward()
        model.sent2vec.PreserveGrad()
    elif rand < 0.8:
        model.sent2vec.set_aug_type("rMask")
    else:
        model.sent2vec.set_aug_type("rReplace")
    preds = model.AugPredict(batch)
    loss, acc = model.LossAndAcc(preds, batch[-2].to(model.device))
    return loss, acc

def ModelTrain(tr_model, weak_set, weak_set_label, max_iter, valid_indices, indices_probs,
               batch_size=32, learning_rate=5e-4, threshold=0.2):
    print("train set perf:", acc_P_R_F1(weak_set_label[valid_indices],
                                        torch.tensor(weak_set.data_y).argmax(dim=1)[valid_indices]))
    loss_list = []
    optim = torch.optim.Adam([
        {'params': tr_model.parameters(), 'lr': learning_rate}
    ])
    for step in range(max_iter):
        batch_idxs =[valid_indices[idx] for idx in WeightedRandomSampler(indices_probs,
                                                                         batch_size,
                                                                         replacement=False)]
        batch = weak_set.collate_raw_batch([weak_set[i] for i in batch_idxs])
        # loss, acc = self.model.RDMLoss(batch)
        loss, acc = AugRDMLoss(tr_model, batch)
        optim.zero_grad()
        loss.backward()
        optim.step()
        torch.cuda.empty_cache()
        print('####Model Update (%3d | %3d) ####, loss = %6.8f' % (
            step, max_iter, loss.data
        ))
        loss_list.append(loss.data.item())
        if (step+1) % (len(valid_indices)//batch_size) == 0:
            mean_loss = np.mean(loss_list)
            loss_list = []
            print("mean loss:", mean_loss)
            if mean_loss < threshold:  # early stop
                break

class MetaSelfTrainer(MetaEvaluatorV2):
    def __init__(self, model: RumorDetection, weak_set, few_shot_set,
                weak_set_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, coeff4expandset=1.0, max_few_shot_size=20,
                batch_size=5):
        super(MetaSelfTrainer, self).__init__(model, weak_set, few_shot_set,
                                               weak_set_label, exp_idxs, weak_set_weights,
                                               convey_fn, lr4model, coeff4expandset,
                                               max_few_shot_size, batch_size)
        self.expand_batch = []

    def weightsLR(self, grad_weights=None):
        self.lr4weights = 0.05 / grad_weights.abs().mean()
        print("self.lr4weights : ", self.lr4weights)

    def LogSelectionInfo(self, e_arr, valid_idxs=None):
        indices = torch.arange(len(self.weak_set))
        print(">>>>>>>MetaEvaluate Message>>>>>>>>>>>>>>>")
        pos_indices = valid_idxs if valid_idxs is not None else indices[self.weak_set_weights.__gt__(0.0)]
        labels, preds = self.weak_set_label[indices], torch.tensor(self.weak_set.data_y)[
            indices].argmax(dim=1)
        print(len(indices))
        print(len(pos_indices))
        print(e_arr.mean(), e_arr[pos_indices].mean())
        print("####Selection Acc", accuracy_score(labels, preds), accuracy_score(labels[pos_indices], preds[pos_indices]))
        #         print(precision_score(labels, preds), precision_score(labels[pos_indices], preds[pos_indices]))
        #         print(recall_score(labels, preds), recall_score(labels[pos_indices], preds[pos_indices]))
        #         print(f1_score(labels, preds), f1_score(labels[pos_indices], preds[pos_indices]))
        print("####init metric:", precision_recall_fscore_support(labels, preds))
        print("####valid metric:", precision_recall_fscore_support(labels[pos_indices], preds[pos_indices]))
        print("<<<<<<<<<<<<<<<<<MetaEvaluate Message<<<<<<<<<<<<")

    def Training(self, entrophys, batch_size=32, max_meta_steps=50,
                    train_lr=5e-4, lr4weights=1.0, ratio=None, pseaudo_idxs=[]):
        self.Evaluate(weight_eta=lr4weights, max_meta_step=max_meta_steps, GSNR_topK=0.5)
        if ratio is None:
            indices = torch.arange(len(self.weak_set), device=self.device)
            valid_idxs = indices[self.weak_set_weights.__gt__(0)]
        else:
            assert ratio > 0
            count = math.ceil(len(self.weak_set)*ratio)
            valid_idxs = self.weak_set_weights.argsort()[-count:]
        self.LogSelectionInfo(entrophys, valid_idxs=valid_idxs)
        self.batch_size = batch_size
        probs = self.weak_set_weights.softmax(dim=0)[valid_idxs]
        ModelTrain(self.model, self.weak_set, self.weak_set_label, max_iter=5000, valid_indices=valid_idxs,
                        indices_probs=probs, batch_size=self.batch_size, learning_rate=train_lr, threshold=0.2)
        return valid_idxs


def test_model(model, test_set, test_label, test_suffix, step=0):
    rst_model = Perf(model, test_set, test_label)
    acc_v, (p_v, r_v, f1_v, _) = rst_model
    print(f"Original Performance of {test_suffix}:", rst_model)
    fitlog.add_best_metric({f"Original_{test_suffix}":
                                   {"valid_acc": acc_v, "valid_prec": p_v[1],
                                    "valid_recall": r_v[1], "valid_f1": f1_v[1],
                                               }})
    fitlog.add_metric({f"{test_suffix}":
                           {"valid_acc": acc_v, "valid_prec": p_v[1],
                            "valid_recall": r_v[1], "valid_f1": f1_v[1],
                            }}, step=step)

def SelfTrain():
    pass

def MetaSelfTrain(tr_model, anno_model, weak_set, weak_set_label, p_idxs, e_idxs, tr_model_suffix, train_iter):
    entrophy, preds, logits = WeakLabeling(anno_model, weak_set, pseaudo_idxs=p_idxs + e_idxs)
    # idxs = expandPseaudoSet(tr_model, anno_model, weak_set, p_idxs + e_idxs, threshold=0.95)
    # p_idxs.extend(idxs)
    IR_weighting = MetaSelfTrainer(tr_model, weak_set, few_shot_set, weak_set_label,
                                   exp_idxs=e_idxs, convey_fn=None, lr4model=5e-4, batch_size=20)
    # IR_weighting.ConstructExpandData(batch_size=32)
    valid_idxs = IR_weighting.Training(entrophy, batch_size=32, max_meta_steps=50,
                                                    train_lr=5e-4, lr4weights=1.0, ratio=0.3, pseaudo_idxs=[])

    # rst_model1 = model1.valid(Bi_new_domain_loader, all_metrics=True)
    rst_model1 = Perf(model1, weak_set, weak_set_label)
    acc_v, (p_v, r_v, f1_v, _) = rst_model1
    print("%3d | %3d Post-MetaTrain Performance of model1:", rst_model1)
    pseaudo_labels = torch.tensor(unlabeled_set.data_y).argmax(dim=1)
    acc_s, (p_s, r_s, f1_s, _) = acc_P_R_F1(weak_set_label[valid_idxs],
                                            pseaudo_labels[valid_idxs])
    # acc_p, (p_p, r_p, f1_p, _) = acc_P_R_F1(weak_set_label[pseaudo_idxs],
    #                                         pseaudo_labels[pseaudo_idxs])
    # acc_t, (p_t, r_t, f1_t, _) = acc_P_R_F1(weak_set_label[pseaudo_idxs + valid_idxs],
    #                                         pseaudo_labels[pseaudo_idxs + valid_idxs])
    fitlog.add_metric({f"{tr_model_suffix}":
                           {"valid_acc": acc_v, "valid_prec": p_v[1],
                            "valid_recall": r_v[1], "valid_f1": f1_v[1],
                            "selected_num": len(valid_idxs) * 1.0 / len(unlabeled_set),
                            "selected_acc": acc_s, "selected_prec": p_s[1],
                            "selected_recall": r_s[1], "selected_f1": f1_s[1],
                            # "pseaudo_acc": acc_p, "pseaudo_prec": p_p[1],
                            # "pseaudo_recall": r_p[1], "pseaudo_f1": f1_p[1],
                            # "train_acc": acc_t, "train_prec": p_t[1],
                            # "train_recall": r_t[1], "train_f1": f1_t[1],
                            "init_entrophy": entrophy.mean(),
                            "selected_entrophy": entrophy[valid_idxs].mean()
                            }}, step=train_iter)

    return valid_idxs, p_idxs

def MetaSelfTrain_ET():
    pass

def MetaSelfTrain_EF():
    pass

BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.68.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.68.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
               ]

BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.71.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
               ]

domain_ID = 0
few_shot_cnt = 80

Tf_Idf_twitter_file = "../../saved/TfIdf_twitter.pkl"
if os.path.exists(Tf_Idf_twitter_file):
    with open(Tf_Idf_twitter_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    few_shot_set, old_domain, new_domain = obtain_Domain_set(f"../../data/twitter_fs{domain_ID}_{few_shot_cnt}",
                                                            f"../../data/twitter_od{domain_ID}_{few_shot_cnt}",
                                                            f"../../data/twitter_nd{domain_ID}_{few_shot_cnt}")
    lemma = Lemma_Factory()
    corpus = [" ".join(lemma(txt)) for data in [few_shot_set, old_domain, new_domain]
                                    for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_twitter_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)


model1 = obtain_model(tv)
model2 = obtain_model(tv)

log_dir = str(__file__).rstrip(".py")
# log_dir = "MetaSelfTrain_0"
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

fitlog.set_log_dir("%s/" % log_dir, new_log=True)
few_shot_set, old_domain, new_domain = obtain_Domain_set(
                                            f"../../data/twitter_fs{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_od{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_nd{domain_ID}_{few_shot_cnt}"
                                        )

new_domain_name = new_domain.data[new_domain.data_ID[0]]['event']
new_domain_label = torch.tensor(new_domain.data_y).argmax(dim=1)

model1.load_model(BiGCN1_Paths[domain_ID])
model2.load_model(BiGCN2_Paths[domain_ID])

pseaudo_idxs = []
expand_set_idxs = []
unlabeled_set = new_domain

# test_model(model1, unlabeled_set, new_domain_label, f"model1_{new_domain_name}")
# test_model(model2, unlabeled_set, new_domain_label, f"model2_{new_domain_name}")

train_it = 0
print(f"few_shot_set/new_domain/old_domain = {len(few_shot_set)}/{len(new_domain)}/{len(old_domain)}")
for it in range(60):
    train_it += 1
    MetaSelfTrain(model1, model2, unlabeled_set, new_domain_label, pseaudo_idxs,
                    expand_set_idxs, f"model1_{new_domain_name}", train_it)
    MetaSelfTrain(model2, model1, unlabeled_set, new_domain_label, pseaudo_idxs,
                    expand_set_idxs, f"model2_{new_domain_name}", train_it)