import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import MetaEvaluator, WeightedAcc
from torch.utils.data import DataLoader
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
import pickle
import torch
from tqdm import trange
import torch.nn as nn
import random
import os
import fitlog

def Features(data, model:RumorDetection):
    model.eval()
    with torch.no_grad():
        vecs = []
        for i in trange(0, len(data), 20):
            batch = data.collate_raw_batch([data[j] for j in range(i, min(i + 20, len(data)), 1)])
            batch_vec = model.Batch2Vecs(batch)
            vecs.append(batch_vec)
    model.train()
    return torch.cat(vecs, dim=0)

def FeatureMatch(new_domain, old_domain, model:RumorDetection, convey_fn=None):
    if convey_fn is not None:
        unlabel_set = convey_fn(new_domain)
        label_set = convey_fn(old_domain)
    else:
        unlabel_set = new_domain
        label_set = old_domain

    unlabel_vecs = Features(unlabel_set, model)
    labeled_vecs = Features(label_set, model)
    dot = labeled_vecs.matmul(unlabel_vecs.T)
    norm = labeled_vecs.norm(2, dim=1).unsqueeze(1).matmul(unlabel_vecs.norm(2, dim=1).unsqueeze(0))
    cosine = dot/norm

    cosine_all = cosine.reshape([-1]).cpu().numpy()
    quantile_75 = cosine_all[
                        cosine_all.argsort()[int(len(cosine_all)*0.75)]
                    ]
    votes = cosine.__gt__(quantile_75).int().sum(dim=1)
    vals, idxs = votes.sort()
    return idxs

def obtain_model(tfidf_vec):
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/", emb_update=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def WeakLabeling(model:RumorDetection, data, batch_size=20):
    data_loader = DataLoader(data,
                             batch_size=batch_size,
                             shuffle=False,
                             collate_fn=data.collate_raw_batch)
    preds = []
    with torch.no_grad():
        for batch in data_loader:
            pred = model.forward(batch)
            preds.append(pred)
    weak_label = (torch.cat(preds) > 0.5).long().tolist()
    data.data_y = weak_label
    pred_tensor = torch.cat(preds)
    # print("===> weak label logits:", pred_tensor.max(dim=1)[0].tolist())
    entrophy = (pred_tensor.log().abs() * pred_tensor).sum(dim=1)
    return entrophy

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix):
    fs_set = TwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = TwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = TwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set

def Convert_2_BiGCNFormat(data):
    new_data = BiGCNTwitterSet()
    new_data.data = data.data
    new_data.data_ID = data.data_ID
    new_data.data_len = data.data_len
    new_data.data_y = data.data_y
    return new_data

#=============================Expand Few Shot Set======================================
def NewFewShotSet(model:RumorDetection, expand_cnt):
    if expand_cnt <= 0:
        return None
    idxs = FeatureMatch(new_domain, old_domain, model, convey_fn=Convert_2_BiGCNFormat)
    data_idxs = idxs[-expand_cnt:].tolist()
    new_set = few_shot_set.__class__()
    new_set.data_ID = [old_domain.data_ID[idx] for idx in data_idxs]
    new_set.data_y = [old_domain.data_y[idx] for idx in data_idxs]
    new_set.data_len = [old_domain.data_len[idx] for idx in data_idxs]
    data_dic = {d_ID: old_domain.data[d_ID] for d_ID in new_set.data_ID}
    new_set.data_ID.extend(few_shot_set.data_ID)
    new_set.data_len.extend(few_shot_set.data_len)
    new_set.data_y.extend(few_shot_set.data_y)
    new_set.data = dict(few_shot_set.data, **data_dic)
    return new_set
#======================================================================================

class MetaSelfTrainer(MetaEvaluator):
    def __init__(self, model: RumorDetection, weak_set, few_shot_set,
                weak_set_label, weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, scale_lr4model=1e-3, coeff4expandset=1.0,
                batch_size=5, expand_cnt=100):
        super(MetaSelfTrainer, self).__init__(model, weak_set, few_shot_set,
                                               weak_set_label, weak_set_weights,
                                               convey_fn, lr4model, scale_lr4model, coeff4expandset,
                                               batch_size)
        self.expand_set = NewFewShotSet(model, expand_cnt=expand_cnt)

    def Training(self, max_epoch=1, batch_size=32):
        tmp = (self.lr4model, self.scale_lr4model)
        self.lr4model, self.scale_lr4model = 1e-1, 2e-2
        self.Evaluate(max_epochs=1, max_meta_steps=20, lr4weights=0.1)  # ferguson 上是0.1, sydney上是0.05
        self.lr4model, self.scale_lr4model = tmp[0], tmp[1]
        indices = torch.arange(self.weak_set_size)  # [self.weak_set_weights.__gt__(0.0)]
        print(">>>>>>>MetaEvaluate Message>>>>>>>>>>>>>>>")
        true_labels = torch.tensor(self.weak_set_label)[indices].argmax(dim=1)
        weak_labels = torch.tensor(self.weak_set.data_y)[indices].argmax(dim=1)
        print(len(indices))
        print(self.weak_set_weights.__gt__(0.0).int().sum())
        print(WeightedAcc(true_labels,
                          weak_labels,
                          torch.ones([len(indices)]))
              )
        print(WeightedAcc(true_labels,
                          weak_labels,
                          self.weak_set_weights[indices])
              )
        print("<<<<<<<<<<<<<<<<<MetaEvaluate Message<<<<<<<<<<<<")
        valid_indices = indices[self.weak_set_weights.__gt__(0.0)]
        # *************************balance the training data***************************#
        valid_labels = weak_labels[valid_indices]
        ratio = valid_labels.sum()*1.0/len(valid_indices)
        if ratio > 0.67:
            neg_indices = valid_indices[valid_labels.__lt__(1)].tolist()
            insert_times = round( (len(valid_labels) - len(neg_indices))*1.0/len(neg_indices) ) - 1
            valid_indices = valid_indices.tolist() + neg_indices*insert_times
        elif ratio < 0.33:
            pos_indices = valid_indices[valid_labels.__gt__(0)].tolist()
            insert_times = round( (len(valid_labels) - len(pos_indices))*1.0/len(pos_indices) ) - 1
            valid_indices = valid_indices.tolist() + pos_indices*insert_times
        else:
            valid_indices = valid_indices.tolist()
        #******************************************************************************#
        self.batch_size = batch_size
        for epoch in range(max_epoch):
            shuffled_indices = random.sample(valid_indices, len(valid_indices))*2
            sum_loss = 0.
            for start in range(0, len(valid_indices), self.batch_size):
                batch_idxs = shuffled_indices[start:start + self.batch_size]
                batch = self.weak_set.collate_raw_batch(
                    [self.weak_set[idx] for idx in batch_idxs]
                )
                loss = self.InnerLoss(batch, self.model)
                cost = torch.mean(loss)
                self.model.zero_grad()
                self.model_optim.zero_grad()
                cost.backward()
                self.model_optim.step()
                torch.cuda.empty_cache()
                print('####Model Update (%3d | %3d) ####, loss = %6.8f' % (
                    start, len(self.weak_set), loss.data.mean()
                ))
                sum_loss += cost.data
            mean_loss = (sum_loss*1.0)/((len(valid_indices)//self.batch_size)+1)
            print("mean loss:", mean_loss)
            if mean_loss <0.2: # early stop
                break

BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.74.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.68.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.71.pkl"
               ]

BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.75.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.72.pkl"
               ]

Tf_Idf_twitter_file = "../../saved/TfIdf_twitter.pkl"
if os.path.exists(Tf_Idf_twitter_file):
    with open(Tf_Idf_twitter_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    i = 1
    few_shot_set, old_domain, new_domain = obtain_Domain_set("../../data/twitter_fs%d" % i,
                                                            "../../data/twitter_od%d" % i,
                                                            "../../data/twitter_nd%d" % i)
    lemma = Lemma_Factory()
    corpus = [" ".join(lemma(txt)) for data in [few_shot_set, old_domain, new_domain]
                                    for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_twitter_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

log_dir = str(__file__).rstrip(".py")
# log_dir = "MetaSelfTrain_0"
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

model1 = obtain_model(tv)
model2 = obtain_model(tv)

few_shot_cnt = 70
fitlog.set_log_dir("%s/" % log_dir, new_log=True)

i = 3
# old_domain, new_domain = obtain_Domain_set("../../data/twitter_tr%d"%i,
#                                            "../../data/twitter_dev%d"%i,
#                                            "../../data/twitter_te%d"%i)
few_shot_set, old_domain, new_domain = obtain_Domain_set(
                                           "../../data/twitter_fs%d"%i,
                                           "../../data/twitter_od%d"%i,
                                           "../../data/twitter_nd%d"%i)

new_domain_name = new_domain.data[new_domain.data_ID[0]]['event']
new_domain_label = new_domain.data_y

model1.load_model(BiGCN1_Paths[i])
Bi_new_domain = Convert_2_BiGCNFormat(new_domain)
Bi_new_domain_loader = DataLoader(Bi_new_domain, batch_size=20, shuffle=False,
                               collate_fn=Bi_new_domain.collate_raw_batch)
rst_model1 = model1.valid(Bi_new_domain_loader, all_metrics=True)
print("Original Performance of model1_%s:"%new_domain_name, rst_model1)
fitlog.add_best_metric({"BiGCN_Original_%s"%new_domain_name:
                                        {"valid_acc": rst_model1[0],
                                         "valid_loss": rst_model1[1],
                                         "valid_prec": rst_model1[2],
                                         "valid_recall": rst_model1[3],
                                         "valid_f1": rst_model1[4]
                                           }})
fitlog.add_metric({"model1_%s" % new_domain_name:
                       {"valid_acc": rst_model1[0],
                        "valid_loss": rst_model1[1],
                        "valid_prec": rst_model1[2],
                        "valid_recall": rst_model1[3],
                        "valid_f1": rst_model1[4]
                        }}, step=0)


model2.load_model(BiGCN2_Paths[i])
# BURvNN_new_domain = Convert_2_BURvNNFormat(new_domain)
# BR_new_domain_loader = DataLoader(BURvNN_new_domain, batch_size=20, shuffle=False,
#                                collate_fn=BURvNN_new_domain.collate_raw_batch)
rst_model2 = model2.valid(Bi_new_domain_loader, all_metrics=True)
print("Original Performance of model2_%s:"%new_domain_name, rst_model2)
fitlog.add_best_metric({"BuRvNN_model_Original_%s"%new_domain_name:
                                    {"valid_acc": rst_model2[0],
                                    "valid_loss": rst_model2[1],
                                    "valid_prec": rst_model2[2],
                                    "valid_recall": rst_model2[3],
                                    "valid_f1": rst_model2[4]
                                    }})
fitlog.add_metric({"model2_%s" % new_domain_name:
                       {"valid_acc": rst_model2[0],
                        "valid_loss": rst_model2[1],
                        "valid_prec": rst_model2[2],
                        "valid_recall": rst_model2[3],
                        "valid_f1": rst_model2[4]
                        }}, step=0)
unlabeled_set = Convert_2_BiGCNFormat(new_domain)
for it in range(40):
    e_arr = WeakLabeling(model2, unlabeled_set)
    IR_weighting = MetaSelfTrainer(model1, unlabeled_set, Convert_2_BiGCNFormat(few_shot_set),
                                   new_domain_label, convey_fn=None, lr4model=5e-2,
                                    scale_lr4model=4e-2, batch_size=20, expand_cnt=0)
    IR_weighting.Training(max_epoch=10, batch_size=32)
    print(">>>>>>>>>>>>Entrophy Increase>>>>>>>>>>>>>>>>>")
    print(e_arr.mean(), e_arr[IR_weighting.weak_set_weights.__gt__(0.0)].mean())
    print(">>>>>>>>>>>>Entrophy Increase>>>>>>>>>>>>>>>>>")
    # model1.load_model("./%s/MetaBiGCN_%s.pkl"%(log_dir, new_domain_name))
    rst_model1 = model1.valid(Bi_new_domain_loader, all_metrics=True)
    print("%3d | %3d Post-MetaTrain Performance of model1:", rst_model1)
    valid_indices = IR_weighting.weak_set_weights.__gt__(0.0)
    labels = torch.tensor(IR_weighting.weak_set_label).argmax(dim=1)
    pseaudo_labels = torch.tensor(IR_weighting.weak_set.data_y).argmax(dim=1)
    fitlog.add_metric({"model1_%s"%new_domain_name:
                        {"valid_acc": rst_model1[0],
                         "valid_loss": rst_model1[1],
                         "valid_prec": rst_model1[2],
                         "valid_recall": rst_model1[3],
                         "valid_f1": rst_model1[4],
                         "selected_num": valid_indices.int().sum()*1.0/len(IR_weighting.weak_set_weights),
                         "selected_acc": accuracy_score(labels[valid_indices], pseaudo_labels[valid_indices]),
                         "init_entrophy":e_arr.mean(),
                         "selected_entrophy":e_arr[valid_indices].mean()
                           }},  step=it+1)


    e_arr = WeakLabeling(model1, unlabeled_set)
    IR_weighting = MetaSelfTrainer(model2, unlabeled_set, Convert_2_BiGCNFormat(few_shot_set),
                                   new_domain_label, convey_fn=None, lr4model=5e-2,
                                    scale_lr4model=4e-3, batch_size=20, expand_cnt=0)
    IR_weighting.Training(max_epoch=10, batch_size=32)
    print(">>>>>>>>>>>>Entrophy Increase>>>>>>>>>>>>>>>>>")
    print(e_arr.mean(), e_arr[IR_weighting.weak_set_weights.__gt__(0.0)].mean())
    print(">>>>>>>>>>>>Entrophy Increase>>>>>>>>>>>>>>>>>")
    rst_model2 = model2.valid(Bi_new_domain_loader, all_metrics=True)
    print("%3d | %3d Post-MetaTrain Performance of model2:", rst_model1)
    valid_indices = IR_weighting.weak_set_weights.__gt__(0.0)
    labels = torch.tensor(IR_weighting.weak_set_label).argmax(dim=1)
    pseaudo_labels = torch.tensor(IR_weighting.weak_set.data_y).argmax(dim=1)
    fitlog.add_metric({"model2_%s"%new_domain_name:
                        {"valid_acc": rst_model2[0],
                         "valid_loss": rst_model2[1],
                         "valid_prec": rst_model2[2],
                         "valid_recall": rst_model2[3],
                         "valid_f1": rst_model2[4],
                         "selected_num": valid_indices.int().sum()*1.0/len(IR_weighting.weak_set_weights),
                         "selected_acc": accuracy_score(labels[valid_indices], pseaudo_labels[valid_indices]),
                         "init_entrophy": e_arr.mean(),
                         "selected_entrophy": e_arr[valid_indices].mean()
                           }},  step=it+1)