import torch
from torch import nn
import torch.nn.functional as F

from torch.optim import Adam
from torch.utils.data import DataLoader

from sklearn import metrics

from util import build_vocab, evaluate, CharTokenizer, PAD
from dataset import LabeledDanmuDataset
from modeling import TextRNNClf

vocab = build_vocab('../data/train.txt')

tokenizer = CharTokenizer(vocab)

train = LabeledDanmuDataset('../data/train', tokenizer, seq_len=16)
dev = LabeledDanmuDataset('../data/dev', tokenizer, seq_len=16)

model = TextRNNClf(vocab_size=tokenizer.vocab_size,
                   padding_idx=vocab[PAD])


adam = Adam(lr=1e-3, params=model.parameters())

train_loader = DataLoader(train, batch_size=64, shuffle=True)
dev_loader = DataLoader(dev, batch_size=64, shuffle=False)


def main():
    model.train()
    total_batch = 0
    dev_best_loss = float('inf')
    for epoch in range(50):
        print('Epoch [{}/{}]'.format(epoch + 1, 50))

        for i, (trains, labels) in enumerate(train_loader):
            model.zero_grad()
            outputs = model(trains)
            loss = F.nll_loss(outputs, labels)
            loss.backward()
            adam.step()

            if total_batch % 200 == 0:
                true = labels.data.cpu()
                predic = torch.max(outputs.data, 1)[1].cpu()
                train_acc = metrics.accuracy_score(true, predic)
                dev_acc, dev_loss = evaluate(model, dev_loader)
                if dev_loss < dev_best_loss:
                    dev_best_loss = dev_loss
                    torch.save(model.state_dict(), '{}'.format(total_batch))
                    improve = '*'
                else:
                    improve = ''

                msg = 'Iter: {0:>6},  Train Loss: {1:>5.2},  Train Acc: {2:>6.2%},  Val Loss: {3:>5.2},  Val Acc: {4:>6.2%} {5}'
                print(msg.format(total_batch, loss.item(), train_acc, dev_loss, dev_acc, improve))
                model.train()
            total_batch += 1


main()
