import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import InstanceReweighting
import copy
from RumdetecFramework.InstanceReweighting import update_current_devices, to_var
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_score, \
            recall_score, f1_score,precision_recall_fscore_support
import pickle
import torch
import torch.nn as nn
import random
import os
import fitlog

def pred_Logits(model:RumorDetection, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in range(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:RumorDetection, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, 1], vals[:, 1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:RumorDetection, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def obtain_model(tfidf_vec) -> BiGCNRumorDetec:
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix) -> Tuple[TwitterSet, TwitterSet, TwitterSet]:
    fs_set = TwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = TwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = TwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set

def Convert_2_BiGCNFormat(data) -> BiGCNTwitterSet:
    new_data = BiGCNTwitterSet()
    new_data.data = data.data
    new_data.data_ID = data.data_ID
    new_data.data_len = data.data_len
    new_data.data_y = data.data_y
    return new_data

def BiGCNEvaluater(data_set:BiGCNTwitterSet, label):
    def evaluater(model:BiGCNRumorDetec):
        acc, p_r_f1 = Perf(model, data_set, label)
        return acc, p_r_f1[0][1], p_r_f1[1][1], p_r_f1[2][1]
    return evaluater

class OnlineTrainer(InstanceReweighting):
    def __init__(self, model: RumorDetection, train_set, few_shot_set,
                weak_set_label, weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, scale_lr4model=1e-3, coeff4expandset=1.0,
                batch_size=5):

        super(OnlineTrainer, self).__init__(model, train_set, few_shot_set,
                                             weak_set_label, weak_set_weights=weak_set_weights, convey_fn=convey_fn,
                                             lr4model=lr4model, scale_lr4model=scale_lr4model, coeff4expandset=coeff4expandset,
                                             batch_size=batch_size)


    def Training(self, max_epoch=100,
                 evaluater=None,
                 model_file="./tmp.pkl"):
        best_valid_acc = 0.0
        if torch.cuda.device_count()== 1 :
            tmp_model = copy.deepcopy(self.model)
        else:
            tmp_model = copy.deepcopy(self.model).to(torch.device("cuda:1"))
            update_current_devices(tmp_model, torch.device("cuda:1"))
        few_shot_data = self.few_shot_set.collate_raw_batch(
            [self.few_shot_set[i] for i in range(len(self.few_shot_set))]
        )
        for epoch in range(max_epoch):
            shuffled_indices = random.sample(list(range(len(self.weak_set))),
                                             len(self.weak_set)) * 2
            for step in range(0, len(self.weak_set), self.batch_size):
                tmp_model.load_state_dict(self.model.state_dict())
                indices = shuffled_indices[step:step + self.batch_size]
                batch, indices = self.InnerBatch(indices)
                weights = to_var(torch.zeros(self.batch_size)).to(tmp_model.device)
                grad_weights = self.ComputeGrads4Weights(step, batch, weights, tmp_model, few_shot_data)
                print("grad:", grad_weights.norm())
                print("grad_weights:", grad_weights)
                w_tilde = torch.clamp(-grad_weights, min=0)
                print("w_tilde:", w_tilde)
                norm_c = torch.sum(w_tilde)
                w = w_tilde.data / norm_c.data if norm_c != 0 else w_tilde
                w = w.data.to(self.device)
                loss = self.InnerLoss(batch, self.model)
                cost = torch.sum(loss * w)
                self.model.zero_grad()
                self.model_optim.zero_grad()
                cost.backward()
                self.model_optim.step()
                torch.cuda.empty_cache()
                print('####Model Update (%3d | %3d [%3d, %3d]) ####, loss = %6.8f' % (
                    step, len(self.weak_set), epoch, max_epoch, loss.data.mean()
                ))
            if evaluater is not None:
                val_acc, prec, rec, f1 = evaluater(self.model)
                print(
                    '##### %6d | %6d, val_acc/val_prec/val_rec/val_f1 = %6.8f/%6.7f/%6.7f/%6.7f, best_valid_acc = %6.7f' % (
                        epoch, max_epoch,
                        val_acc, prec, rec, f1,
                        best_valid_acc
                    )
                )
                if val_acc > best_valid_acc:
                    best_valid_acc = val_acc
                    self.model.save_model(model_file)

BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.68.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.68.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
               ]

BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.71.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
               ]


log_dir = str(__file__).rstrip(".py")
# log_dir = "MetaSelfTrain_0"
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

few_shot_cnt = 100
fitlog.set_log_dir("%s/" % log_dir, new_log=True)
domain_ID = 4
few_shot_set, old_domain, new_domain = obtain_Domain_set(
                                            f"../../data/twitter_fs{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_od{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_nd{domain_ID}_{few_shot_cnt}"
                                        )
Tf_Idf_twitter_file = "../../saved/TfIdf_twitter.pkl"
if os.path.exists(Tf_Idf_twitter_file):
    with open(Tf_Idf_twitter_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    # i = 1
    # few_shot_set, old_domain, new_domain = obtain_Domain_set(f"../../data/twitter_fs{i}_{few_shot_cnt}",
    #                                                         f"../../data/twitter_od{i}_{few_shot_cnt}",
    #                                                         f"../../data/twitter_nd{i}_{few_shot_cnt}")
    lemma = Lemma_Factory()
    corpus = [" ".join(lemma(txt)) for data in [few_shot_set, old_domain, new_domain]
                                    for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_twitter_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

model1 = obtain_model(tv)
new_domain_name = new_domain.data[new_domain.data_ID[0]]['event']
# model1.load_model(BiGCN1_Paths[domain_ID])

trainer = OnlineTrainer(model1, Convert_2_BiGCNFormat(old_domain), Convert_2_BiGCNFormat(few_shot_set),
                old_domain.labelTensor, weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, scale_lr4model=1e-3, coeff4expandset=1.0,
                batch_size=32)
evaluater = BiGCNEvaluater(Convert_2_BiGCNFormat(new_domain), new_domain.labelTensor)
trainer.Training(max_epoch=20,
                 evaluater=evaluater,
                 model_file="./tmp.pkl")



