import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import InstanceReweightingV3
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score,precision_recall_fscore_support
from typing import Tuple
import pickle
import torch
import torch.nn as nn
import random
import os
import fitlog


def pred_Logits(model:RumorDetection, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in range(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:RumorDetection, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, 1], vals[:, 1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:RumorDetection, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def obtain_model(tfidf_vec) -> BiGCNRumorDetec:
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True, grad_preserve=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix):
    fs_set = BiGCNTwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = BiGCNTwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = BiGCNTwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set

def Convert_2_BiGCNFormat(data) -> BiGCNTwitterSet:
    new_data = BiGCNTwitterSet()
    new_data.data = data.data
    new_data.data_ID = data.data_ID
    new_data.data_len = data.data_len
    new_data.data_y = data.data_y
    return new_data

class WindTrainer(InstanceReweightingV3):
    def __init__(self, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5):
        super(WindTrainer, self).__init__(class_num, lr4model, coeff4expandset, max_few_shot_size,
                                                      Inner_BatchSize)
        self.log_dir = log_dir
        fitlog.set_log_dir(log_dir, new_log=True)
        self.suffix = suffix
        self.weight_eta = weight_eta
        self.best_valid_acc = 0.0
        self.meta_step = meta_step

    def MetaStep(self, model:RumorDetection, optim:torch.optim, batch,
                    weight:torch.Tensor, weight_mask, meta_step=5):
        assert hasattr(self, "few_shot_data")
        assert hasattr(self, "few_shot_data_list")
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        for step in range(self.meta_step):
            u = weight.sigmoid()
            model.zero_grad()
            loss = self.LossList(model, batch)
            sumLoss = (u * loss).sum()
            sumLoss.backward()
            optim.step()
            self.val_grad_dicts, fewLoss, fewAcc = self.meanGradOnValSet(model,
                                                                         few_shot_data=self.few_shot_data,
                                                                         few_shot_data_list=self.few_shot_data_list)
            print(f"##Perf on Meta Val Set## {step} | {meta_step} :  loss/acc = {fewLoss}/{fewAcc}")
            model.load_state_dict(initStateDicts)
            u_grads = self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            w_grads = u_grads*u*(1-u)
            weightGrads = -1 * (w_grads / w_grads.norm(2))
            print("uGrads:", u_grads)
            print("wGrads:", w_grads)
            print("weightGrads:", weightGrads)
            update = self.weight_eta * weightGrads
            weight = weight - update*(weight_mask.to(update.device))
        return weight

    def OptimStep(self, model, model_optim, batch, weight):
        loss = self.LossList(model, batch)
        sumLoss = ((weight.sigmoid()) * loss).sum()
        sumLoss.backward()
        model_optim.step()

    def dataIter(self, OOD_Set, InD_Set=None, batch_size=32):
        p_idxs = list(range(len(OOD_Set)))
        p_len = len(p_idxs)
        if InD_Set is None:
            l_len = 0
            l_idxs = []
        else:
            l_idxs = list(range(len(InD_Set)))
            l_len = len(l_idxs)
        data_size = p_len + l_len
        idxs = random.sample(range(data_size), data_size)*2
        for start_i in range(0, data_size, batch_size):
            batch_idxs = idxs[(start_i):(start_i+batch_size)]
            items = [OOD_Set[p_idxs[idx]] if idx < p_len else \
                        InD_Set[l_idxs[idx-p_len]] for idx in batch_idxs]
            yield OOD_Set.collate_raw_batch(items), batch_idxs, \
                    torch.tensor([1. if idx < p_len else 0. for idx in batch_idxs])

    def Training(self, model:RumorDetection, train_set:BiGCNTwitterSet, valid_set:BiGCNTwitterSet,
                 test_set:BiGCNTwitterSet, indomain_set:BiGCNTwitterSet=None, max_epoch=100, max_valid_every=100,
                 model_file="./tmp.pkl"):
        meta_optim = torch.optim.SGD([
            {'params': model.parameters(), 'lr': self.lr4model}
        ])
        model_optim = torch.optim.Adam([
            {'params': model.parameters(), 'lr': self.lr4model}
        ])
        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)
        weights = [0.0]*len(train_set) + \
                    ([] if indomain_set is None else [10.0]*len(indomain_set))
        self.train_set_weights = torch.tensor(weights, device=self.device)
        test_label = torch.tensor(test_set.data_y).argmax(dim=1)
        step = 0
        for epoch in range(max_epoch):
            for batch, indices, weight_mask in self.dataIter(train_set, indomain_set,self.batch_size):
                weights = self.train_set_weights[indices]
                new_weights = self.MetaStep(model, meta_optim, batch, weights, weight_mask)
                self.train_set_weights[indices] = new_weights
                self.OptimStep(model, model_optim, batch, new_weights)
                if (step+1) % max_valid_every == 0:
                    self.valid(model, test_set, test_label, self.suffix, step)
                step += 1
        model.save_model(model_file)

    def valid(self, model, test_set, test_label, test_suffix, step=0):
        rst_model = Perf(model, test_set, test_label)
        acc_v, (p_v, r_v, f1_v, _) = rst_model
        print("step = {} : ".format(step), rst_model)
        output_items = [("valid_acc", acc_v)] + \
                       [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                       [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                       [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
        fitlog.add_metric({f"{test_suffix}": dict(output_items)}, step=step)
        fitlog.add_best_metric({f"FinalPerf_{self.suffix}": dict(output_items)})

if __name__ == "__main__":
    BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
                   "../../saved/TFIDF_BiGCN_ferguson_0.75.pkl",
                   "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
                   "../../saved/TFIDF_BiGCN_ottawashooting_0.72.pkl",
                   "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
                   ]

    BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
                   "../../saved/TFIDF_BiGCN_ferguson_0.76.pkl",
                   "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
                   "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
                   "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
                   ]

    logDir = str(__file__).rstrip(".py")
    # logDir = "OnlineTest"
    if os.path.exists(logDir):
        os.system("rm -rf %s" % logDir)
    os.system("mkdir %s" % logDir)

    fewShotCnt = 100
    domainID = 4
    fewShotSet, oldDomain, newDomain = obtain_Domain_set(
                                                f"../../data/twitter_fs{domainID}_{fewShotCnt}",
                                                f"../../data/twitter_od{domainID}_{fewShotCnt}",
                                                f"../../data/twitter_nd{domainID}_{fewShotCnt}"
                                            )
    TfIdf_twitter_file = "../../saved/TfIdf_twitter.pkl"
    if os.path.exists(TfIdf_twitter_file):
        with open(TfIdf_twitter_file, "rb") as fr:
            tv = pickle.load(fr)
    else:
        lemma = Lemma_Factory()
        corpus = [" ".join(lemma(txt)) for data in [fewShotSet, oldDomain, newDomain]
                                        for ID in data.data_ID for txt in data.data[ID]['text']]
        tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
        _ = tv.fit_transform(corpus)
        with open(TfIdf_twitter_file, "wb") as fw:
            pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

    model1 = obtain_model(tv)
    newDomainName = newDomain.data[newDomain.data_ID[0]]['event']
    print(f"old/new/few = {len(oldDomain)}/{len(newDomain)}/{len(fewShotSet)}")
    trainer = WindTrainer(class_num=2, log_dir=logDir, suffix=f"{newDomainName}_FS{fewShotCnt}",
                          weight_eta=0.1, lr4model=5e-4, max_few_shot_size=20, Inner_BatchSize=20, meta_step=10)
    trainer.Training(model1, oldDomain, fewShotSet, newDomain, max_epoch=50, max_valid_every=30,
                     model_file=f"Wind_{newDomainName}_FS{fewShotCnt}.pkl")