from seq.preproc import prepro
from seq.seq import Sequence
from seq.basic import get_score
from seq.dataset import get_loader
from seq.config import config
import torch
import torch.nn.functional as F
import numpy as np
from tensorboardX import SummaryWriter
from absl import app
import os
from tqdm import tqdm
import json


def save(context_ids, context_label, aspect_pos, polarity, y, fout, config):
    def parse_sent(ids, dic):
        token_list = []
        for i in ids:
            if i in dic:
                token_list.append(dic[i])
            else:
                token_list.append(dic[0])
        return ' '.join(token_list)

    polarity_list = ['negative', 'neutral', 'positive']
    data_dir = os.path.join(config.model, config.dataset)
    word2id = json.load(open(os.path.join(data_dir, config.word2id_file), 'r'))
    id2word = {v: k for k, v in word2id.items()}
    for cid, cl, ap, p, yy in zip(context_ids, context_label, aspect_pos, polarity, y):
        print(parse_sent(cid, id2word), file=fout)
        print(polarity_list[p], file=fout)
        print(['{}:{}'.format(t, p) for t, p in zip(polarity_list, yy)], file=fout)


def train(config):
    device = torch.device(config.device)
    # random seed
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(config.seed)
    # load data
    data_dir = os.path.join(config.model, config.dataset)
    train_fname = os.path.join(data_dir, config.train_data)
    test_fname = os.path.join(data_dir, config.test_data)
    train_data = get_loader(train_fname, config.batch)
    test_data = get_loader(test_fname, config.batch)
    wordemb = np.loadtxt(os.path.join(data_dir, config.wordmat_file))
    # init model
    model = Sequence(dim_word=config.dim_word, dim_hidden=config.dim_hidden, num_class=config.num_class,
                     wordmat=wordemb, device=device).to(device)
    # train
    # summary writer
    writer = SummaryWriter('logs/%s/%s/%s' % (config.dataset, config.model, config.timestr))
    model_save_dir = os.path.join(config.model_save, config.dataset, config.model)
    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)
    result_dir = os.path.join(config.result_save, config.dataset, config.model, 'train')
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    parameters = filter(lambda p: p.requires_grad, model.parameters())
    optim = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
    best_acc = 0.0
    for epoch in tqdm(range(config.max_epoch)):
        # train
        save_fout = open(os.path.join(result_dir, '{}.txt'.format(epoch)), 'w')
        model.train()
        for i, batch_data in tqdm(enumerate(train_data)):
            model.zero_grad()
            context_ids, context_label, aspect_mask, aspect_pos, polarity = batch_data
            context_ids, context_label, aspect_mask, aspect_pos, polarity = context_ids.to(device), \
                                                                            context_label.to(device), \
                                                                            aspect_mask.to(device), \
                                                                            aspect_pos.to(device), \
                                                                            polarity.to(device)
            loss1, y = model(context_ids, context_label, aspect_mask, aspect_pos)
            save(context_ids.tolist(), context_label.tolist(), aspect_pos.tolist(), polarity.tolist(),
                 y.tolist(), save_fout, config)
            loss2 = F.cross_entropy(y, polarity)
            writer.add_scalar('loss1', loss1, len(train_data) * epoch + i)
            writer.add_scalar('loss2', loss2, len(train_data) * epoch + i)
            lambdaa = 0.9
            loss = loss1 * lambdaa + loss2 * (1.0 - lambdaa)
            # print(loss1, loss2, loss)
            loss.backward()
            optim.step()
        # eval
        model.eval()
        # eval on train
        logit_list = []
        rating_list = []
        for batch_data in tqdm(train_data):
            context_ids, context_label, aspect_mask, aspect_pos, polarity = batch_data
            context_ids, context_label, aspect_mask, aspect_pos, polarity = context_ids.to(device), \
                                                                            context_label.to(device), \
                                                                            aspect_mask.to(device), \
                                                                            aspect_pos.to(device), \
                                                                            polarity.to(device)
            loss1, y = model(context_ids, context_label, aspect_mask, aspect_pos)
            # loss = cross_entropy(logit, polarity)
            logit_list.append(y.cpu().data.numpy())
            rating_list.append(polarity.cpu().data.numpy())
        train_acc, train_precision, train_recall, train_f1 = get_score(np.concatenate(logit_list, 0),
                                                                       np.concatenate(rating_list, 0))
        # writer.add_scalar('train_loss', train_loss, epoch)
        writer.add_scalar('train_acc', train_acc, epoch)
        writer.add_scalar('train_precision', train_precision, epoch)
        writer.add_scalar('train_recall', train_recall, epoch)
        writer.add_scalar('train_f1', train_f1, epoch)
        # eval on test
        logit_list = []
        rating_list = []
        for batch_data in tqdm(test_data):
            context_ids, context_label, aspect_mask, aspect_pos, polarity = batch_data
            context_ids, context_label, aspect_mask, aspect_pos, polarity = context_ids.to(device), \
                                                                            context_label.to(device), \
                                                                            aspect_mask.to(device), \
                                                                            aspect_pos.to(device), \
                                                                            polarity.to(device)
            loss1, y = model(context_ids, context_label, aspect_mask, aspect_pos)
            # loss = cross_entropy(logit, polarity)
            logit_list.append(y.cpu().data.numpy())
            rating_list.append(polarity.cpu().data.numpy())
        test_acc, test_precision, test_recall, test_f1 = get_score(np.concatenate(logit_list, 0),
                                                                   np.concatenate(rating_list, 0))
        # writer.add_scalar('test_loss', test_loss, epoch)
        writer.add_scalar('test_acc', test_acc, epoch)
        writer.add_scalar('test_precision', test_precision, epoch)
        writer.add_scalar('test_recall', test_recall, epoch)
        writer.add_scalar('test_f1', test_f1, epoch)
        print('epoch %2d : '
              ' train_acc=%.4f, train_precision=%.4f, train_recall=%.4f,train_f1=%.4f,'
              ' test_acc=%.4f, test_precision=%.4f, test_recall=%.4f, test_f1=%.4f' %
              (epoch, train_acc, train_precision, train_recall, train_f1,
               test_acc, test_precision, test_recall, test_f1))
        # show parameters
        for name, param in model.named_parameters():
            writer.add_histogram(name, param, epoch, bins='doane')
        # save model
        torch.save(model.state_dict(), os.path.join(model_save_dir, '{}.pth'.format(epoch)))
        if test_acc > best_acc:
            torch.save(model.state_dict(), os.path.join(model_save_dir, 'best.pth'))
            best_acc = test_acc


def test(config):
    device = torch.device(config.device)
    # load data
    data_dir = os.path.join(config.model, config.dataset)
    test_fname = os.path.join(data_dir, config.test_data)
    test_data = get_loader(test_fname, config.batch)
    wordemb = np.loadtxt(os.path.join(data_dir, config.wordmat_file))
    # charemb = np.loadtxt(os.path.join(data_dir, config.charmat_file))
    # init model
    model = Sequence(dim_word=config.dim_word, dim_hidden=config.dim_hidden, num_class=config.num_class,
                     wordmat=wordemb, device=device).to(device)
    # load model
    model_save_dir = os.path.join(config.model_save, config.dataset, config.model)
    result_dir = os.path.join(config.result_save, config.dataset, config.model, 'test')
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    save_fout = open(os.path.join(result_dir, 'best.txt'), 'w')
    model.load_state_dict(torch.load(os.path.join(model_save_dir, 'best.pth')))
    model = model.to(device)
    model.eval()
    # init loss
    logit_list = []
    rating_list = []
    for batch_data in tqdm(test_data):
        context_ids, context_label, aspect_mask, aspect_pos, polarity = batch_data
        context_ids, context_label, aspect_mask, aspect_pos, polarity = context_ids.to(device), \
                                                                        context_label.to(device), \
                                                                        aspect_mask.to(device), \
                                                                        aspect_pos.to(device), \
                                                                        polarity.to(device)
        loss1, y = model(context_ids, context_label, aspect_mask, aspect_pos)
        save(context_ids.tolist(), context_label.tolist(), aspect_pos.tolist(), polarity.tolist(),
             y.tolist(), save_fout, config)
        logit_list.append(y.cpu().data.numpy())
        rating_list.append(polarity.cpu().data.numpy())
    test_acc, test_precision, test_recall, test_f1 = get_score(np.concatenate(logit_list, 0),
                                                               np.concatenate(rating_list, 0))
    print('test_acc=%.4f, test_precision=%.4f, test_recall=%.4f, test_f1=%.4f' %
          (test_acc, test_precision, test_recall, test_f1))


def main(_):
    if config.mode == 'prepro':
        prepro(config)
    elif config.mode == 'train':
        train(config)
    elif config.mode == 'test':
        test(config)
    else:
        raise Exception('unknown mode')


if __name__ == '__main__':
    app.run(main)
