import fitlog
import torch
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from sklearn.metrics import accuracy_score
import os
import sys
from torch.utils.data import DataLoader
from .BaseRumorFramework import RumorDetection
from .AdverRumorFramework import TopicAdverRumorDetection


class TheseusRumorDetection(RumorDetection):
    def __init__(self, sent2vec, propagation, classifier, batch_size=5, grad_accum_cnt=4):
        super(TheseusRumorDetection, self).__init__(sent2vec, propagation, classifier, batch_size, grad_accum_cnt)

    def TheseusTrain(self, train_set, dev_set, test_set,
                    valid_every=100, max_epochs=10,
                    log_dir="../logs/", log_suffix="_RumorDetection", model_file=""):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        train_loader = DataLoader(train_set, batch_size=self.batch_size, shuffle=True,
                                  collate_fn=train_set.collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)
        optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 5e-5 * 1.0 / self.grad_accum_cnt}
        ]
        )
        

        best_valid_acc, best_test_acc, best_valid_test_acc = 0.0, 0.0, 0.0
        counter = 0
        optim.zero_grad()
        self.train()
        sum_loss, sum_acc = 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                loss, acc = self.RDMLoss(batch[0], batch[2].to(self.device))
                loss.backward()
                torch.cuda.empty_cache()
                if step % self.grad_accum_cnt == 0:
                    optim.step()
                    optim.zero_grad()
                sum_loss += loss
                sum_acc += acc
                if (step + 1) % self.grad_accum_cnt == 0:
                    print('%6d | %6d  [%3d | %3d], loss/acc = %6.8f/%6.7f, best_valid_acc:%6.7f ' % (
                        step, len(train_loader),
                        epoch, max_epochs,
                        sum_loss / self.grad_accum_cnt, sum_acc / self.grad_accum_cnt,
                        best_valid_acc
                    )
                          )
                    fitlog.add_metric(
                        {"train": {"acc": sum_acc / self.grad_accum_cnt, "loss": sum_loss / self.grad_accum_cnt}},
                        step=counter
                    )
                    sum_loss, sum_acc = 0.0, 0.0
                    counter += 1
                if (step + 1) % (valid_every * self.grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid":{"acc":val_acc, "loss":val_loss}}, 
                        step=counter
                    )
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)
        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_test_acc,
                                                         "best_valid_test_acc": best_valid_test_acc,
                                                         "best_test_acc": best_test_acc}})
        fitlog.finish()

class LogitsTheseusRumorDetection(RumorDetection):
    def __init__(self, sent2vec, propagation, classifier, batch_size=5, grad_accum_cnt=4):
        super(LogitsTheseusRumorDetection, self).__init__(sent2vec, propagation, classifier, batch_size, grad_accum_cnt)

    def LogitsRDMLoss(self, batch, logits):
        seqs, labels = batch[0], batch[-2]
        preds = self.forward(seqs)
        loss = F.mse_loss(preds, logits)
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def TheseusTrain(self, train_set, dev_set, test_set, teacherRD,
                    valid_every=100, max_epochs=10,
                    log_dir="../logs/", log_suffix="_RumorDetection", model_file=""):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        train_loader = DataLoader(train_set, batch_size=self.batch_size, shuffle=True,
                                  collate_fn=train_set.collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)
        optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 5e-5 * 1.0 / self.grad_accum_cnt}
        ]
        )
        

        best_valid_acc, best_test_acc, best_valid_test_acc = 0.0, 0.0, 0.0
        counter = 0
        optim.zero_grad()
        self.train()
        sum_loss, sum_acc = 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                teacher_logits = teacherRD(batch[0])
                loss, acc = self.LogitsRDMLoss(batch, teacher_logits)
                loss.backward()
                torch.cuda.empty_cache()
                if step % self.grad_accum_cnt == 0:
                    optim.step()
                    optim.zero_grad()
                sum_loss += loss
                sum_acc += acc
                if (step + 1) % self.grad_accum_cnt == 0:
                    print('%6d | %6d  [%3d | %3d], loss/acc = %6.8f/%6.7f, best_valid_acc:%6.7f ' % (
                        step, len(train_loader),
                        epoch, max_epochs,
                        sum_loss / self.grad_accum_cnt, sum_acc / self.grad_accum_cnt,
                        best_valid_acc
                    )
                          )
                    fitlog.add_metric(
                        {"train": {"acc": sum_acc / self.grad_accum_cnt, "loss": sum_loss / self.grad_accum_cnt}},
                        step=counter
                    )
                    sum_loss, sum_acc = 0.0, 0.0
                    counter += 1
                if (step + 1) % (valid_every * self.grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid":{"acc":val_acc, "loss":val_loss}}, 
                        step=counter
                    )
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)

class SentDistillModel(RumorDetection):
    def __init__(self, sent2vec, propagation, classifier, t_sentvec, distill_level="sent"):
        super(SentDistillModel, self).__init__(sent2vec, propagation, classifier)
        self.t_sentvec = t_sentvec
        if distill_level == "sent":
            self.distill_level = 1
        elif distill_level == "token":
            self.distill_level = 2
        else:
            print("Error: Did not assign the distill level type.")
            sys.exit(0)

    def TokensLevelDistill(self, sents):
        ipt_ids, attn_masks = self.sent2vec.sents2ids(sents)
        with torch.no_grad():
            t_hiddens, _ = self.t_sentvec.tokens2vecs(ipt_ids, attn_masks)
        val_masks = attn_masks.unsqueeze(-1)
        s_hiddens, _ = self.sent2vec.tokens2vecs(ipt_ids, attn_masks)
        distill_loss = torch.pow((s_hiddens - t_hiddens) * val_masks, 2).mean()
        sent_vec = s_hiddens[:, 0, :] + s_hiddens[:, 1:, :].max(dim=1)[0]
        return sent_vec, distill_loss

    def SentenceLevelDistill(self, sents):
        with torch.no_grad():
            t_vecs = self.t_sentvec(sents)
        s_vecs = self.sent2vec(sents)
        distill_loss = torch.pow((t_vecs - s_vecs), 2).mean()
        return s_vecs, distill_loss

    def DistillLoss(self, seqs, labels):
        sents, seq_len = self.seq2sents(seqs)
        if self.distill_level == 1:
            sent_vecs, distill_loss = self.SentenceLevelDistill(sents)
        elif self.distill_level == 2:
            sent_vecs, distill_loss = self.TokensLevelDistill(sents)
        else:
            print("Error: Invalid Distill Level Type! Distill_Level Value:", self.distill_level)
            sys.exit(0)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        loss = self.nll_loss_fn(preds.log(), labels)
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        final_loss = 0.9 * loss + 0.1 * distill_loss
        return final_loss, distill_loss, loss, acc

    def train_iters(self, train_set, dev_set, test_set,
                    valid_every=100, max_epochs=10, model_file=""):
        batch_size = 5
        grad_accum_cnt = 4
        train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True,
                                  collate_fn=train_set.collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=batch_size * grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=batch_size * grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)
        optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 2e-5 * 1.0 / grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-7 * 1.0 / grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-7 * 1.0 / grad_accum_cnt}
        ]
        )
        

        best_valid_acc, best_test_acc, best_valid_test_acc = 0.0, 0.0, 0.0
        counter = 0
        optim.zero_grad()
        self.train()
        sum_loss1, sum_loss2, sum_loss3, sum_acc = 0.0, 0.0, 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                final_loss, distill_loss, loss, acc = self.DistillLoss(batch[0], batch[2].to(self.device))
                final_loss.backward()
                torch.cuda.empty_cache()
                if step % grad_accum_cnt == 0:
                    optim.step()
                    optim.zero_grad()
                sum_loss1 += final_loss
                sum_loss2 += distill_loss
                sum_loss3 += loss
                sum_acc += acc
                if (step + 1) % grad_accum_cnt == 0:
                    print(
                        '%6d | %6d  [%3d | %3d], Loss=%6.7f, distill_loss=%6.7f, rdm_loss/acc = %6.8f/%6.7f, best_valid_acc:%6.7f ' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            sum_loss1 / grad_accum_cnt, sum_loss2 / grad_accum_cnt,
                            sum_loss3 / grad_accum_cnt, sum_acc / grad_accum_cnt,
                            best_valid_acc
                        )
                        )
                    fitlog.add_metric(
                        {"train": {"acc": sum_acc / self.grad_accum_cnt, "loss": sum_loss1/ self.grad_accum_cnt}},
                        step=counter
                    )
                    sum_loss1, sum_loss2, sum_loss3, sum_acc = 0.0, 0.0, 0.0, 0.0
                    counter += 1
                if (step + 1) % (valid_every * grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid":{"acc":val_acc, "loss":val_loss}}, 
                        step=counter
                    )
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)
        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_test_acc,
                                                         "best_valid_test_acc": best_valid_test_acc,
                                                         "best_test_acc": best_test_acc}})
        fitlog.finish()

    def save_model(self, model_file):
        sent_model_file = "%s_sent.pkl"%(model_file.rstrip(".pkl"))
        self.sent2vec.save_model(sent_model_file)
        torch.save(
            {
                
                "prop_model": self.prop_model.state_dict(),
                "rdm_cls": self.rdm_cls.state_dict()
            },
            model_file
        )

    def load_model(self, model_file):
        if os.path.exists(model_file):
            sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
            self.sent2vec.load_model(sent_model_file)
            checkpoint = torch.load(model_file)
            
            self.rdm_cls.load_state_dict(checkpoint["rdm_cls"])
            self.prop_model.load_state_dict(checkpoint['prop_model'])
        else:
            print("Error: pretrained file %s is not existed!" % model_file)
            sys.exit()

class TheseusTopicAdverRumorDetection(TopicAdverRumorDetection):
    def __init__(self, sent2vec, propagation, classifier, topic_label_num):
        super(TheseusTopicAdverRumorDetection, self).__init__(sent2vec, propagation, classifier, topic_label_num)
    def TheseusTrain(self, train_set, dev_set, test_set,
                     valid_every=100, max_epochs=10,
                     log_dir="../logs/", log_suffix="_RumorDetection", model_file=""):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        batch_size = 5
        train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True,
                                  collate_fn=train_set.collate_raw_batch)
        grad_accum_cnt = 4
        
        dev_loader = DataLoader(dev_set, batch_size=batch_size*grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)

        te_loader = DataLoader(test_set, batch_size=batch_size*grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)

        optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 5e-7 * 1.0 / grad_accum_cnt}
        ]
        )
        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0
        counter = 0
        optim.zero_grad()
        self.train()
        sum_adver_loss = 0.0
        sum_loss, sum_topic_loss = 0.0, 0.0
        sum_acc, sum_topic_acc = 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                adver_loss, loss, acc, topic_loss, topic_acc = self.Adver_Loss(batch[0], batch[2].to(self.device), batch[3].to(self.device))
                adver_loss.backward()
                if step % grad_accum_cnt == 0:
                    optim.step()
                    optim.zero_grad()
                torch.cuda.empty_cache()
                sum_adver_loss += adver_loss
                sum_loss += loss
                sum_acc += acc
                sum_topic_loss += topic_loss
                sum_topic_acc += topic_acc
                if step % grad_accum_cnt == 0:
                    print(
                        '%6d | %6d  [%3d | %3d], adver_loss=%6.7f, loss/acc = %6.8f/%6.7f, topic_loss/topic_acc:%6.7f / %6.7f, best_valid_acc:%6.7f ' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            sum_adver_loss / grad_accum_cnt, sum_loss / grad_accum_cnt, sum_acc / grad_accum_cnt,
                            sum_topic_loss / grad_accum_cnt, sum_topic_acc / grad_accum_cnt,
                            best_valid_acc
                        )
                        )
                    sum_adver_loss = 0.0
                    sum_loss, sum_topic_loss = 0.0, 0.0
                    sum_acc, sum_topic_acc = 0.0, 0.0

                fitlog.add_metric(
                    {"train": {"acc":acc, "loss":loss} }, 
                    step=counter
                )
                counter += 1
                if (step + 1) % (valid_every * grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid":{"acc":val_acc, "loss":val_loss}}, 
                        step=counter
                    )
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)
        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_test_acc,
                                                         "best_valid_test_acc": best_valid_test_acc,
                                                         "best_test_acc": best_test_acc}})
        fitlog.finish()