import sys
sys.path.append("..")
from Dataloader.dataloader_utils import shuffle_data
from Dataloader.twitterloader import TopicReader, TwitterSet
from SentModel.sentence_trainer import TopicTrainer, TopicGANTrainer, LMTrainer
from RumdetecFramework.AdverRumorFramework import TopicAdverRumorDetection
from SentModel.Sent2Vec import BertVec
from PropModel.SeqPropagation import GRUModel
import torch
import torch.nn as nn
from torch.utils.data import DataLoader

def obtain_general_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = TwitterSet()
    tr_set.load_data_fast(data_prefix=tr_prefix, min_len=5)
    dev_set = TwitterSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = TwitterSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    return tr_set, dev_set, te_set

def DatasetReconstruct(tr, dev, te):
    bigDic = dict(dict(tr.data, **dev.data), **te.data)
    all_IDs = [ID for ids in [tr.data_ID, dev.data_ID, te.data_ID] for ID in ids]
    all_y = [y for labels in [tr.data_y, dev.data_y, te.data_y] for y in labels]
    all_l = [l for lens in [tr.data_len, dev.data_len, te.data_len] for l in lens]
    tr_samples = len(dev) + len(te) + len(te)
    d_samples = int((len(tr) - len(te)) / 2)
    tr_ids, dev_ids, te_ids = all_IDs[-tr_samples:], all_IDs[-(tr_samples + d_samples):-tr_samples], all_IDs[:d_samples]
    tr_y, dev_y, te_y = all_y[-tr_samples:], all_y[-(tr_samples + d_samples):-tr_samples], all_y[:d_samples]
    tr_l, dev_l, te_l = all_l[-tr_samples:], all_l[-(tr_samples + d_samples):-tr_samples], all_l[:d_samples]

    lm_tr, lm_dev, lm_te = TwitterSet(), TwitterSet(), TwitterSet()
    lm_tr.data = {ID: bigDic[ID] for ID in tr_ids}
    lm_dev.data = {ID: bigDic[ID] for ID in dev_ids}
    lm_te.data = {ID: bigDic[ID] for ID in te_ids}

    lm_tr.data_ID = tr_ids
    lm_tr.data_len = tr_l
    lm_tr.data_y = tr_y

    lm_dev.data_ID = dev_ids
    lm_dev.data_len = dev_l
    lm_dev.data_y = dev_y

    lm_te.data_ID = te_ids
    lm_te.data_len = te_l
    lm_te.data_y = te_y
    return lm_tr, lm_dev, lm_te

def obtain_AdverBertRD():
    sent2vec = BertVec("../../bert_en/", bert_parallel=True)

    lm_trainer = LMTrainer(sent2vec, sent2vec.bert.module.config)

    topic_gan = TopicGANTrainer(sent2vec, topic_label_num=5, sent_hidden_size=768, grl=False)

    prop = GRUModel(768, 256, 1, 0.2)
    cls = nn.Linear(256, 2)
    BertRD = TopicAdverRumorDetection(sent2vec, prop, cls, topic_label_num=5)
    BertRD.topic_cls = topic_gan.topic_cls
    return BertRD, topic_gan, lm_trainer

def pretrained_BertRD(pretrained_file):
    sent2vec = BertVec("../../bert_en/", bert_parallel=False)
    prop = GRUModel(768, 256, 1, 0.2)
    cls = nn.Linear(256, 2)
    ch = torch.load(pretrained_file)
    sent2vec.load_state_dict(ch['sent2vec'])
    sent2vec.bert = nn.DataParallel(sent2vec.bert, device_ids=list(range(torch.cuda.device_count())))
    prop.load_state_dict(ch['prop_model'])
    cls.load_state_dict(ch['rdm_cls'])
    BertRD = TopicAdverRumorDetection(sent2vec, prop, cls, topic_label_num=5)
    return BertRD

def collate_sents(batch):
    sents = [sent for item in batch for sent in item[0]]
    return sents


# bvec = BertVec("../../bert_en", bert_parallel=True)
# lvec = W2VLSTMVec("../saved/word2vec_cn/", 300, 1, None, False)
# tr_loader = DataLoader(tr, batch_size=8, shuffle=True, collate_fn=collate_sents)
# dev_loader = DataLoader(dev, batch_size=20, shuffle=False, collate_fn=collate_sents)
# te_loader = DataLoader(te, batch_size=20, shuffle=False, collate_fn=collate_sents)

# ch = torch.load("../saved/bert_cn_weibolm.pkl")
# bvec.load_state_dict(ch['sent2vec'])

# trainer = TopicTrainer(bvec, topic_label_num=5, sent_hidden_size=768)
# trainer = TopicGANTrainer(bvec, topic_label_num=5, sent_hidden_size=768, grl=False)
# trainer.AdversarialTrain(tr, dev, te,
#                          max_train_iters=1000, min_step=10, max_step=1,
#                          valid_every=100, learning_rate=2e-3,
#                          model_file="TopicGAN_BERT.pkl")

model, topicGAN, lm_trainer = obtain_AdverBertRD()

# topic_tr = TopicReader("../data/TwitterTopic_tr.csv")
# topic_dev = TopicReader("../data/TwitterTopic_dev.csv")
# topic_te = TopicReader("../data/TwitterTopic_te.csv")
#
# topicGAN.TopicGANTrain(topic_tr, topic_dev, topic_te,
#                      max_train_iters=1000, min_step=10, max_step=1,
#                      valid_every=100, learning_rate=2e-3,
#                      model_file="TopicGAN_BERT.pkl")

i = 1
tr, dev, te = obtain_general_set("../data/twitter_tr%d"%i, "../data/twitter_dev%d"%i, "../data/twitter_te%d"%i)
lm_tr, lm_dev, lm_te = DatasetReconstruct(tr, dev, te)
print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (te.data[te.data_ID[0]]['event'], len(dev), len(te), len(tr)))
print("\n\n===========Train Settings, GPU:%d===========\n\n"%torch.cuda.device_count())
tr.ResortSample(6)
lm_tr.ResortSample(6)
best_valid_acc, best_test_acc, best_valid_test_acc = 0.0, 0.0, 0.0
for _ in range(10):
    best_valid_acc, best_test_acc, best_valid_test_acc = model.train_iters(
                                                        tr, dev, te, max_epochs=1,
                                                        log_dir="../logs/", log_suffix="_PT_RDM_1",
                                                        best_valid_acc=best_valid_acc,
                                                        best_test_acc=best_test_acc,
                                                        best_valid_test_acc=best_valid_test_acc,
                                                        model_file="PT_RDM_1_%s.pkl"% (te.data[te.data_ID[0]]['event'])
    )
    tr_loader = DataLoader(lm_tr, batch_size=4, shuffle=True, collate_fn=collate_sents)
    dev_loader = DataLoader(lm_dev, batch_size=4, shuffle=False, collate_fn=collate_sents)
    te_loader = DataLoader(lm_te, batch_size=4, shuffle=False, collate_fn=collate_sents)
    lm_trainer.LMTrain(tr_loader, dev_loader, te_loader, max_epoch=2, learning_rate=2e-4, model_file="bert_cn_weibolm.pkl")

