import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import os
import sys
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter

class RLRumorDetection(nn.Module):
    def __init__(self, sent2vec, propagation, actor, critic, batch_size=5, grad_accum_cnt=4):
        super(RLRumorDetection, self).__init__()
        self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.sent2vec = sent2vec.to(self.device)
        self.prop_model = propagation.to(self.device)
        self.actor = actor.to(self.device)
        self.critic = critic.to(self.device)
        self.batch_size = batch_size
        self.grad_accum_cnt = grad_accum_cnt

    def seq2sents(self, seqs):
        sent_list = [sent for seq in seqs for sent in seq]
        seq_len = [len(seq) for seq in seqs]
        return sent_list, seq_len

    def forward(self, seqs):
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs = self.sent2vec(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        return preds

    def RDMLoss(self, seqs, labels):
        preds = self.forward(seqs)
        loss = self.rdm_loss_fn(preds.log(), labels)
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def train_iters(self, train_set, dev_set, test_set,
                    valid_every=100, max_epochs=10, lr_discount=1.0,
                    log_dir="./logs/", log_suffix="_RumorDetection", model_file=""):
        train_loader = DataLoader(train_set, batch_size=self.batch_size, shuffle=True,
                                  collate_fn=train_set.collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)
        optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 5e-5 * lr_discount / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt}
        ]
        )
        writer = SummaryWriter(log_dir, filename_suffix=log_suffix)
        best_valid_acc, best_test_acc, best_valid_test_acc = 0.0, 0.0, 0.0
        counter = 0
        optim.zero_grad()
        self.train()
        sum_loss, sum_acc = 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                loss, acc = self.RDMLoss(batch[0], batch[2].to(self.device))
                loss.backward()
                torch.cuda.empty_cache()
                if step % self.grad_accum_cnt == 0:
                    optim.step()
                    optim.zero_grad()
                sum_loss += loss
                sum_acc += acc
                if (step + 1) % self.grad_accum_cnt == 0:
                    print('%6d | %6d  [%3d | %3d], loss/acc = %6.8f/%6.7f, best_valid_acc:%6.7f ' % (
                        step, len(train_loader),
                        epoch, max_epochs,
                        sum_loss / self.grad_accum_cnt, sum_acc / self.grad_accum_cnt,
                        best_valid_acc
                    )
                          )
                    writer.add_scalar("train loss", sum_loss / self.grad_accum_cnt, counter)
                    writer.add_scalar("train acc", sum_acc / self.grad_accum_cnt, counter)
                    sum_loss, sum_acc = 0.0, 0.0
                    counter += 1
                if (step + 1) % (valid_every * self.grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    writer.add_scalar("valid loss", val_acc, counter)
                    writer.add_scalar("valid acc", val_loss, counter)
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)

    def valid(self, data_loader, pretrained_file=None, all_metrics=False):
        self.eval()
        if pretrained_file is not None and os.path.exists(pretrained_file):
            self.load_model(pretrained_file)
        labels = []
        preds = []
        with torch.no_grad():
            for batch in data_loader:
                pred = self.forward(batch[0])
                torch.cuda.empty_cache()
                preds.append(pred)
                labels.append(batch[2])
            pred_tensor = torch.cat(preds, dim=0)
            label_tensor = torch.cat(labels, dim=0)
            val_acc = accuracy_score(label_tensor.numpy(),
                                     pred_tensor.cpu().argmax(dim=1).numpy())
            val_loss = self.rdm_loss_fn(pred_tensor.to(self.device).log(), label_tensor.to(self.device))
        if all_metrics:
            val_prec = precision_score(label_tensor.numpy(),
                                       pred_tensor.cpu().argmax(dim=1).numpy())
            val_recall = recall_score(label_tensor.numpy(),
                                       pred_tensor.cpu().argmax(dim=1).numpy())
            val_f1 = f1_score(label_tensor.numpy(),
                               pred_tensor.cpu().argmax(dim=1).numpy())
            return val_acc, val_loss, val_prec, val_recall, val_f1
        else:
            return val_acc, val_loss

    def save_model(self, model_file):
        torch.save(
            {
                "sent2vec": self.sent2vec.state_dict(),
                "prop_model": self.prop_model.state_dict(),
                "rdm_cls": self.rdm_cls.state_dict()
            },
            model_file
        )

    def load_model(self, model_file):
        if os.path.exists(model_file):
            checkpoint = torch.load(model_file)
            
            self.rdm_cls.load_state_dict(checkpoint["rdm_cls"])
            self.prop_model.load_state_dict(checkpoint['prop_model'])
        else:
            print("Error: pretrained file %s is not existed!" % model_file)
            sys.exit()