# -*- coding: utf-8 -*-

import torch
import argparse
from torch.utils.data import DataLoader

from data_processor import DataProcessor
from data_processor import TranslateDataset
from data_processor import DataSpliter
from translator import Translator
from engine import Engine


def config():
    parser = argparse.ArgumentParser()
    parser.add_argument("--batch_size", type=int, default=16)
    parser.add_argument("--en2cn", action="store_true")
    parser.add_argument("--test_ratio", type=float, default=0.1)
    parser.add_argument("--lr", type=float, default=1e-4)
    parser.add_argument("--epoch", type=int, default=10)
    parser.add_argument("--device", type=int, default=-1)
    args = parser.parse_args()
    return args


def main():
    args = config()

    device = args.device
    if device >= 0:
        device = torch.device("cuda:{}".format(device))
    else:
        device = torch.device("cpu")

    # ** Model **
    max_vocab_size = 20000
    max_len = 200
    src_vocab_size, tgt_vocab_size = max_vocab_size, max_vocab_size
    d_model, n_head, n_layer, src_n_position, tgt_n_position = 64, 8, 6, max_len, max_len
    translator = Translator(src_vocab_size, tgt_vocab_size, d_model, n_head,
                            n_layer, src_n_position, tgt_n_position, dropout=0.1,
                            max_gen_len=max_len)
    translator = translator.to(device)
    print(translator)

    # ** Data **
    if args.en2cn:
        src_filename = "data/en.txt"
        tgt_filename = "data/zh.txt"
    else:
        src_filename = "data/zh.txt"
        tgt_filename = "data/en.txt"

    dp = DataProcessor(src_filename, tgt_filename, max_vocab_size, max_vocab_size)
    src_token_list, src_vocab, tgt_token_list, tgt_vocab = dp.get_all_corpus_and_vocab()
    train_src_list, train_tgt_list, test_src_list, test_tgt_list = \
        DataSpliter(src_token_list, tgt_token_list, args.test_ratio).load_train_test_data_list()

    train_dataset = TranslateDataset(train_src_list, src_vocab, max_len,
                                     train_tgt_list, tgt_vocab, max_len)
    test_dataset = TranslateDataset(test_src_list, src_vocab, max_len,
                                    test_tgt_list, tgt_vocab, max_len)
    train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)

    # ** Engine for training and test**
    engine = Engine(translator, train_dataloader, test_dataloader,
                    lr=args.lr, max_len=max_len, device=device)

    # print("ENGINE TEST")
    # engine.test()

    print("ENGINE TRAIN")
    for e in range(args.epoch):
        engine.train()


if __name__ == '__main__':
    main()
