import sys
sys.path.append("..")
sys.path.append("../..")
from RumdetecFramework.BaseRumorFramework import RumorDetection
from Dataloader.twitterloader import CAMI_TwitterSet
from Dataloader.dataloader_utils import *
from SentModel.Sent2Vec import Para2Vec
from PropModel.SeqPropagation import CAMICNN
import os
import torch.nn as nn
from gensim.models.doc2vec import Doc2Vec, TaggedDocument


if len(sys.argv) !=1:
    os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]

def obtain_CAMI(model_file):
    lvec = Para2Vec(model_file)
    prop = CAMICNN(56,dropout_prob=0.8,
                 k_max1=10, k_max2=5)
    cls = nn.Linear(prop.prop_hidden_size, 2)
    LSTMRD = RumorDetection(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return LSTMRD

def obtain_Sort_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = CAMI_TwitterSet()
    tr_set.load_data_fast(data_prefix=tr_prefix, min_len=5)
    dev_set = CAMI_TwitterSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = CAMI_TwitterSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    return tr_set, dev_set, te_set

class Para2VecTrainer:
    def __init__(self, model_file):
        self.model_file = model_file

    def get_dataset(self, filename, cate):
        with open(filename, 'r') as f:
            print("Reading " + filename + " .......")
            docs = f.readlines()
        x_train = []
        for doc in docs:
            id, text = doc.split("\t")  # 切分文档id与内容
            word_list = text.split(' ')
            length = len(word_list)
            word_list[length - 1] = word_list[length - 1].strip()
            document = TaggedDocument(word_list, tags=[str(cate) + ':' + str(id)])
            x_train.append(document)
        return x_train

    def IncrementTrain(self, train_data):
        self.model = Doc2Vec.load(self.model_file)
        tte = self.model.corpus_count + len(train_data)
        self.model.train(train_data, total_examples=tte, epochs=70)
        self.model.save(self.model_file)

    def TrainIters(self, corpus_file_list, vector_size, window = 5,
                 dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1,
                 dv=None, dv_mapfile=None, comment=None, trim_rule=None):
        for cate, corpus_file in enumerate(corpus_file_list):
            x_train = self.get_dataset(corpus_file, cate)
            if os.path.exists(self.model_file):
                self.model = Doc2Vec.load(self.model_file)
                print(corpus_file+ " is ready for training!!!")
            else:
                print(corpus_file + " is initating training-model!!!")
                self.model = Doc2Vec(x_train, vector_size=vector_size, window = window,
                     dm_mean=dm_mean, dm=dm, dbow_words=dbow_words, dm_concat=dm_concat, dm_tag_count=dm_tag_count,
                     dv=dv, dv_mapfile=dv_mapfile, comment=comment, trim_rule=trim_rule)
                self.model.save(self.model_file)
            self.IncrementTrain(x_train)

sent2vec_file = "../../saved/para2vec.twitter"
p_trainer = Para2VecTrainer(sent2vec_file)
p_trainer.TrainIters(['PhEME.txt'], vector_size=56, window = 5,
                 dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1,
                 dv=None, dv_mapfile=None, comment=None, trim_rule=None)


log_dir = str(__file__).rstrip(".py")
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

for t in range(10):
    i=1
    tr, dev, te = obtain_Sort_set("../../data/twitter_tr%d"%i, "../../data/twitter_dev%d"%i, "../../data/twitter_te%d"%i)
    # tr, dev, te = shuffle_data(tr, dev, te)
    tr, dev, te = Sort_data(tr, dev, te)
    tr.filter_short_seq(min_len=5)
    tr.trim_long_seq(10)
    print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (te.data[te.data_ID[0]]['event'], len(dev), len(te), len(tr)))
    model = obtain_CAMI(sent2vec_file)
    model.train_iters(tr, dev, te, max_epochs=10,
                    log_dir=log_dir, log_suffix="Sort",
                    model_file="../../saved/CAMI_Sort_twitter.pkl")
    # te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
    # rst = model.valid(te_loader, pretrained_file="../../saved/CAMI_Sort_twitter.pkl", all_metrics=True)
    # print("##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f"%rst)

    tr, dev, te = obtain_Sort_set("../../data/twitter_tr%d"%i, "../../data/twitter_dev%d"%i, "../../data/twitter_te%d"%i)
    tr, dev, te = shuffle_data(tr, dev, te)
    tr.filter_short_seq(min_len=5)
    tr.trim_long_seq(10)
    print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (te.data[te.data_ID[0]]['event'], len(dev), len(te), len(tr)))
    model = obtain_CAMI()
    model.train_iters(tr, dev, te, max_epochs=10,
                    log_dir=log_dir, log_suffix="General",
                    model_file="../../saved/CAMI_General_twitter.pkl")

    for i in range(5):
        tr, dev, te = obtain_Sort_set("../../data/twitter_tr%d" % i, "../../data/twitter_dev%d" % i, "../../data/twitter_te%d" % i)
        tr.filter_short_seq(min_len=5)
        tr.trim_long_seq(10)
        test_event_name = te.data[te.data_ID[0]]['event']
        print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (
            test_event_name, len(dev), len(te), len(tr)))
        print("\n\n===========%s Train===========\n\n"%te.data[te.data_ID[0]]['event'])
        model = obtain_CAMI()
        model.train_iters(tr, dev, te, max_epochs=100,
                          log_dir=log_dir, log_suffix=test_event_name,
                          model_file="../../saved/CAMI_%s.pkl" % test_event_name)