import os
import json
import argparse
from trainer import SupervisedTrainer, IRGANTrainer
import utils.data_loader as data_utils


def parse_args():
    """
    Parses command line arguments.
    """
    parser = argparse.ArgumentParser('Deep Text Match')

    # data
    parser.add_argument("--config", type=str,
                        default="./configs/stc.gen_cnn.dis_lstm.irgan.50k.json", help="config path")
    parser.add_argument("--expt_dir", type=str,
                        default="./outputs/stc_gan_gen_cnn_dis_lstm_50k", help="config path")
    parser.add_argument('--ckpt_path', type=str, default="./data/stc-gan/pretrained/",
                        help='last checkpoint path')
    return parser.parse_args()


def train(args):
    config = json.load(open(args.config, 'r', encoding="utf-8"))

    if "trainer" in config and config['trainer'].lower() == "irgan_trainer":
        trainer = IRGANTrainer(config, args.expt_dir, args.ckpt_path)
        train_iter = data_utils.load_gan_data(data_path=os.path.join(config['data_dir'], config['train_file']),
                                              vocab_path=os.path.join(config['data_dir'], config['vocab_file']),
                                              batch_size=config['batch_size'],
                                              sent1_maxlen=config['sent1_maxlen'],
                                              sent2_maxlen=config['sent2_maxlen'],
                                              pool_size=config['pool_size'],
                                              shuffle=True)
        valid_iter = data_utils.load_pointwise_data(data_path=os.path.join(config['data_dir'], config['valid_file']),
                                                    vocab_path=os.path.join(config['data_dir'], config['vocab_file']),
                                                    batch_size=config['batch_size']*config['pool_size'],
                                                    sent1_maxlen=config['sent1_maxlen'],
                                                    sent2_maxlen=config['sent2_maxlen'],
                                                    shuffle=False)

        test_iter = data_utils.load_pointwise_data(data_path=os.path.join(config['data_dir'], config['test_file']),
                                                   vocab_path=os.path.join(config['data_dir'], config['vocab_file']),
                                                   batch_size=config['batch_size']*config['pool_size'],
                                                   sent1_maxlen=config['sent1_maxlen'],
                                                   sent2_maxlen=config['sent2_maxlen'],
                                                   shuffle=False)

    else:
        trainer = SupervisedTrainer(config, args.expt_dir, args.ckpt_path)
        train_iter = data_utils.load_pairwise_data(data_path=os.path.join(config['data_dir'], config['train_file']),
                                                   vocab_path=os.path.join(config['data_dir'], config['vocab_file']),
                                                   batch_size=config['batch_size'],
                                                   sent1_maxlen=config['sent1_maxlen'],
                                                   sent2_maxlen=config['sent2_maxlen'],
                                                   shuffle=True)

        valid_iter = data_utils.load_pointwise_data(data_path=os.path.join(config['data_dir'], config['valid_file']),
                                                    vocab_path=os.path.join(config['data_dir'], config['vocab_file']),
                                                    batch_size=config['batch_size'],
                                                    sent1_maxlen=config['sent1_maxlen'],
                                                    sent2_maxlen=config['sent2_maxlen'],
                                                    shuffle=False)

        test_iter = data_utils.load_pointwise_data(data_path=os.path.join(config['data_dir'], config['test_file']),
                                                   vocab_path=os.path.join(config['data_dir'], config['vocab_file']),
                                                   batch_size=config['batch_size'],
                                                   sent1_maxlen=config['sent1_maxlen'],
                                                   sent2_maxlen=config['sent2_maxlen'],
                                                   shuffle=False)

    trainer.train(train_iter, valid_iter, test_iter)
    pass


if __name__ == '__main__':
    train(parse_args())
    pass
