"""
    implement transformer by pytorch
    http://nlp.seas.harvard.edu/2018/04/03/attention.html
"""
import time
import os
import argparse
from transformer import *
from data import *
from utils import *
from opt import SimpleLossCompute, NoamOpt

num_hidden_layers = 6
hidden_size = 512
hidden_fc_size = 2048
num_attention_heads = 8
dropout = 0.1
BATCH_SIZE = 32

N = num_hidden_layers
d_model = hidden_size
d_ff = hidden_fc_size
h = num_attention_heads
# dropout=0.1

parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--gpu', type=int, default=0)
config = parser.parse_args()

os.environ['CUDA_VISIBLE_DEVICES'] = str(config.gpu) if config.gpu >= 0 else ''
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'


def run_epoch(data_iter, model, loss_compute):
    "Standard Training and Logging Function"
    start = time.time()
    total_tokens = 0
    total_loss = 0
    tokens = 0
    for i, batch in enumerate(data_iter):
        out = model.forward(batch.src, batch.trg,
                            batch.src_mask, batch.trg_mask)
        loss = loss_compute(out, batch.trg_y, batch.ntokens)
        total_loss += loss
        total_tokens += batch.ntokens
        tokens += batch.ntokens
        if i % 50 == 1:
            elapsed = time.time() - start
            print("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
                  (i, loss / batch.ntokens, tokens / elapsed))
            start = time.time()
            tokens = 0
    return total_loss / total_tokens


def evaluate(model=None):
    if model is None:
        model = torch.load("logs/model/iwslt.pt", map_location='cpu')

    for _, batch in enumerate(valid_iter):
        src = batch.src.transpose(0, 1)[:1]
        src_mask = (src != SRC.vocab.stoi["<blank>"]).unsqueeze(-2)
        out = greedy_decode(model, src, src_mask,
                            max_len=60, start_symbol=TGT.vocab.stoi["<s>"])
        print("Translation:", end="\t")
        for i in range(1, out.size(1)):
            sym = TGT.vocab.itos[out[0, i]]
            if sym == "</s>": break
            print(sym, end=" ")
        print()
        print("Target:", end="\t")
        for i in range(1, batch.trg.size(0)):
            sym = TGT.vocab.itos[batch.trg.data[i, 0]]
            if sym == "</s>": break
            print(sym, end=" ")
        print()
        # break


if __name__ == '__main__':
    train, valid, test, SRC, TGT = dataloader()
    pad_idx = TGT.vocab.stoi["<blank>"]

    train_iter = MyIterator(train, batch_size=BATCH_SIZE, device=0,
                            repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
                            batch_size_fn=batch_size_fn, train=True)
    valid_iter = MyIterator(valid, batch_size=BATCH_SIZE, device=0,
                            repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
                            batch_size_fn=batch_size_fn, train=False)

    if config.mode == 'train':
        model = make_model(len(SRC.vocab), len(TGT.vocab), N, d_model, d_ff, h, dropout)
        model.cuda()

        model_opt = NoamOpt(model.src_embed[0].d_model, 1, 4000,
                            torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))

        criterion = LabelSmoothing(size=len(TGT.vocab), padding_idx=pad_idx, smoothing=0.1)
        criterion.cuda()

        # train
        for epoch in range(10):
            model.train()
            run_epoch((rebatch(pad_idx, b) for b in train_iter),
                      model,
                      SimpleLossCompute(model.generator, criterion, model_opt))

            model.eval()
            loss = run_epoch((rebatch(pad_idx, b) for b in valid_iter),
                             model,
                             SimpleLossCompute(model.generator, criterion, model_opt))
            print('epoch: %d, eval loss: %.4f' % (epoch, loss))

        evaluate(model)
    else:
        evaluate()
