# coding: utf-8
import argparse
import time
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import pickle
import os
import model
import data_utils
import logging
import utils
from classify import acc_score

def parse_args():
    parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model')
    # tvt：必须具有一个train.txt、一个valid.txt、一个test.txt
    parser.add_argument('--tvt_corpus', type=str, default='../data/wikitext-2', help='location of the data corpus')
    parser.add_argument('--model_dir', type=str, default='models/wiki2/')  # dictionary的地址，如果有将不会制作dictionary
    parser.add_argument('--fine_tune_from', type=str, default='')  # dictionary的地址，如果有将不会制作dictionary
    parser.add_argument('--lr', type=float, default=20,
                        help='initial learning rate')
    parser.add_argument('--vocab_size', type=int, default=25000)
    parser.add_argument('--model', type=str, default='LSTM',
                        help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
    parser.add_argument('--emsize', type=int, default=200,
                        help='size of word embeddings')
    parser.add_argument('--nhid', type=int, default=200,
                        help='number of hidden units per layer')
    parser.add_argument('--nlayers', type=int, default=2,
                        help='number of layers')
    parser.add_argument('--clip', type=float, default=0.25,
                        help='gradient clipping')
    parser.add_argument('--epochs', type=int, default=1000,
                        help='upper epoch limit')
    parser.add_argument('--batch_size', type=int, default=20, metavar='N',
                        help='batch size')
    parser.add_argument('--bptt', type=int, default=35,
                        help='sequence length')
    parser.add_argument('--dropout', type=float, default=0.2,
                        help='dropout applied to layers (0 = no dropout)')
    parser.add_argument('--tied', action='store_true',
                        help='tie the word embedding and softmax weights')
    parser.add_argument('--seed', type=int, default=1111,
                        help='random seed')
    parser.add_argument('--cuda', action='store_true',
                        help='use CUDA')
    parser.add_argument('--log-interval', type=int, default=200, metavar='N',
                        help='report interval')
    args = parser.parse_args()

    args.model_dir = os.path.join(args.model_dir, utils.get_name(args))
    args.dict_path = os.path.join(args.model_dir, 'dict.pkl')
    args.model_path = os.path.join(args.model_dir, 'model.pt')
    args.log_path = os.path.join(args.model_dir, 'train.log')
    args.csv_info_path = os.path.join(args.model_dir, 'train.csv')

    if os.path.exists(args.model_dir) is False:
        os.makedirs(args.model_dir)

    # 为最后的答题测试准备的
    args.question = os.path.join(args.tvt_corpus, 'test.json')

    return args


def config_log(args):
    logging.basicConfig(level=logging.INFO,
                        format='[%(levelname)s] %(message)s',
                        datefmt='%m-%d %H:%M',
                        filename=args.log_path,
                        filemode='w')
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('[%(levelname)s] %(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)


if __name__ == '__main__':
    args = parse_args()
    config_log(args)

    # Set the random seed manually for reproducibility.
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        if not args.cuda:
            logging.warning("WARNING: You have a CUDA device, so you should probably run with --cuda")
        else:
            torch.cuda.manual_seed(args.seed)

    ###############################################################################
    # Load data
    ###############################################################################


    # if args.dict_path is not None and os.path.exists(args.dict_path):
    #     logging.info('use dictionary: {}'.format(args.dict_path))
    #     with open(args.dict, 'rb') as f:
    #         dictionary = pickle.load(f)

    if args.fine_tune_from != "":
        dict_path_old = os.path.join(args.fine_tune_from, 'dict.pkl')
        logging.info('fine-tune: use previous dictionary: {}'.format(dict_path_old))
        with open(dict_path_old, 'rb') as f:
            dictionary = pickle.load(f)
    else:
        dictionary = None

    corpus = data_utils.CorpusTVT(args.tvt_corpus, max_vocab_size=args.vocab_size, dictionary=dictionary)
    logging.info('save dictionary to: {}'.format(args.dict_path))
    corpus.dictionary.save(args.dict_path)

    # Starting from sequential data, batchify arranges the dataset into columns.
    # For instance, with the alphabet as the sequence and batch size 4, we'd get
    # ┌ a g m s ┐
    # │ b h n t │
    # │ c i o u │
    # │ d j p v │
    # │ e k q w │
    # └ f l r x ┘.
    # These columns are treated as independent by the model, which means that the
    # dependence of e. g. 'g' on 'f' can not be learned, but allows more efficient
    # batch processing.

    def batchify(data, bsz):
        # Work out how cleanly we can divide the dataset into bsz parts.
        nbatch = data.size(0) // bsz
        # Trim off any extra elements that wouldn't cleanly fit (remainders).
        data = data.narrow(0, 0, nbatch * bsz)
        # Evenly divide the data across the bsz batches.
        data = data.view(bsz, -1).t().contiguous()
        if args.cuda:
            data = data.cuda()
        return data

    eval_batch_size = 10
    logging.info('loading train valid test dataset...')
    train_data = batchify(corpus.train, args.batch_size)
    val_data = batchify(corpus.valid, eval_batch_size)
    test_data = batchify(corpus.test, eval_batch_size)
    logging.info('load train valid test dataset ok!')

    ###############################################################################
    # Build the model
    ###############################################################################

    if args.fine_tune_from != '':
        model_path = os.path.join(args.fine_tune_from, 'model.pt')
        logging.warning('fine tune from: {}'.format(model_path))
        with open(model_path, 'rb') as f:
            model = torch.load(f)
        logging.warning('load ok!')
    else:
        ntokens = len(corpus.dictionary)
        model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)

    if args.cuda:
        model.cuda()

    criterion = nn.CrossEntropyLoss()

    ###############################################################################
    # Training code
    ###############################################################################

    def repackage_hidden(h):
        """Wraps hidden states in new Variables, to detach them from their history."""
        if type(h) == Variable:
            return Variable(h.data)
        else:
            return tuple(repackage_hidden(v) for v in h)

    # get_batch subdivides the source data into chunks of length args.bptt.
    # If source is equal to the example output of the batchify function, with
    # a bptt-limit of 2, we'd get the following two Variables for i = 0:
    # ┌ a g m s ┐ ┌ b h n t ┐
    # └ b h n t ┘ └ c i o u ┘
    # Note that despite the name of the function, the subdivison of data is not
    # done along the batch dimension (i.e. dimension 1), since that was handled
    # by the batchify function. The chunks are along dimension 0, corresponding
    # to the seq_len dimension in the LSTM.

    def get_batch(source, i, evaluation=False):
        seq_len = min(args.bptt, len(source) - 1 - i)
        data = Variable(source[i:i + seq_len], volatile=evaluation)
        target = Variable(source[i + 1:i + 1 + seq_len].view(-1))
        return data, target

    def evaluate(data_source):
        # Turn on evaluation mode which disables dropout.
        model.eval()
        total_loss = 0
        ntokens = len(corpus.dictionary)
        hidden = model.init_hidden(eval_batch_size)
        for i in range(0, data_source.size(0) - 1, args.bptt):
            data, targets = get_batch(data_source, i, evaluation=True)
            output, hidden = model(data, hidden)
            output_flat = output.view(-1, ntokens)
            total_loss += len(data) * criterion(output_flat, targets).data
            hidden = repackage_hidden(hidden)
        return total_loss[0] / len(data_source)

    def train():
        # Turn on training mode which enables dropout.
        model.train()
        total_loss = 0
        start_time = time.time()
        ntokens = len(corpus.dictionary)
        hidden = model.init_hidden(args.batch_size)
        for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
            data, targets = get_batch(train_data, i)
            # Starting each batch, we detach the hidden state from how it was previously produced.
            # If we didn't, the model would try backpropagating all the way to start of the dataset.
            hidden = repackage_hidden(hidden)
            model.zero_grad()
            output, hidden = model(data, hidden)
            loss = criterion(output.view(-1, ntokens), targets)
            loss.backward()

            # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
            torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
            for p in model.parameters():
                p.data.add_(-lr, p.grad.data)

            total_loss += loss.data

            if batch % args.log_interval == 0 and batch > 0:
                cur_loss = total_loss[0] / args.log_interval
                elapsed = time.time() - start_time
                logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
                             'loss {:5.2f} | ppl {:8.2f}'.format(
                                 epoch, batch, len(train_data) // args.bptt, lr,
                                 elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
                total_loss = 0
                start_time = time.time()

    # Loop over epochs.
    lr = args.lr
    best_val_loss = None
    best_epoch_num = 0

    # At any point you can hit Ctrl + C to break out of training early.
    try:
        csv_info_f = open(args.csv_info_path, 'w')
        csv_info_f.write('epoch,lr,val_loss,val_ppl\n')
        for epoch in range(1, args.epochs + 1):
            epoch_start_time = time.time()
            train()
            val_loss = evaluate(val_data)
            logging.info('-' * 89)
            logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
                         'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
                                                    val_loss, math.exp(val_loss)))
            logging.info('-' * 89)
            csv_info_f.write('{},{},{},{}\n'.format(epoch, lr, val_loss, math.exp(val_loss)))
            # Save the model if the validation loss is the best we've seen so far.
            if not best_val_loss or val_loss < best_val_loss:
                with open(args.model_path, 'wb') as f:
                    torch.save(model, f)
                    logging.info('save model to: {}'.format(args.model_path))
                best_val_loss = val_loss
                best_epoch_num = epoch
            else:
                # Anneal the learning rate if no improvement has been seen in the validation dataset.
                lr /= 4.0
                if epoch - best_epoch_num > 5:
                    # 连续 5 轮 不降 则退出
                    logging.info('Val_loss stops decreasing. Exiting..')
                    break
    except KeyboardInterrupt:
        logging.info('-' * 89)
        logging.info('Exiting from training early')
    finally:
        csv_info_f.close()

    # Load the best saved model.
    with open(args.model_path, 'rb') as f:
        model = torch.load(f)

    # Run on test data.
    test_loss = evaluate(test_data)
    logging.info('=' * 89)
    logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
        test_loss, math.exp(test_loss)))
    logging.info('=' * 89)

    # 运行 答题测试（同样在测试集上）
    args.cuda = False
    acc_score(args)
