


import numpy
from tqdm import tqdm

from utils.transformer_utils import *
from transformer_dataset import PseudoDateDataset
from transformer import Transformer


def train(model: torch.nn.Module, loss_function: torch.nn.modules.loss.MSELoss, optimizer: torch.optim.Adam,
          dataloader: torch.utils.data.DataLoader, epoch: int, gpu_available: bool = False):
    """
    Transformer 训练 demo.
    :param model			: Transformer model.
    :param loss_function	: loss函数.
    :param optimizer		: 优化器.
    :param dataloader		: 拓展 `torch.utils.data.Dataset`的数据加载器.
    :param epoch			: 当前epoch.
    :param gpu_available	: GPU可用？

    """

    def _calc_accuracy(y_pred: torch.FloatTensor, y_true: torch.FloatTensor) -> float:
        y_pred = numpy.argmax(y_pred, axis=2)
        y_true = numpy.argmax(y_true, axis=2)
        correct = (y_pred == y_true).astype(numpy.int)
        return correct.sum() / (y_pred.shape[0] * y_pred.shape[1])

    pbar = tqdm(total=len(dataloader), bar_format='{l_bar}{r_bar}', dynamic_ncols=True)
    pbar.set_description('Epoch {}'.format(epoch))
    for step, (batch_x, batch_y, _) in enumerate(dataloader):
        if gpu_available:
            batch_x = batch_x.cuda()
            batch_y = batch_y.cuda()
        y_pred = model(batch_x, batch_y[:, :-1, :])
        accuracy = _calc_accuracy(
            y_pred=y_pred.detach().cpu().numpy(),
            y_true=batch_y[:, 1:, :].detach().cpu().numpy(),
        )
        loss = loss_function(y_pred, batch_y[:, 1:, :])
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        pbar.set_postfix(**{'loss': loss.detach().cpu().item(), 'accuracy': accuracy})
        pbar.update()
    save_checkpoint('checkpoint', epoch, model, optimizer)
    pbar.close()


if __name__ == '__main__':
    gpu_id = '0'
    batch_size = 4
    n_epoch = 5
    dataset = PseudoDateDataset(size=1000)
    padding_tensor = torch.FloatTensor(numpy.zeros(len(dataset.human_vocabulary)))
    padding_tensor[dataset.human_vocabulary.get(dataset.pad_token)] = 1
    dataloader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=collate_fn(padding_tensor),
    )
    params = dict(
        d_input_encoder=37,
        d_input_decoder=12,
        d_output_encoder=64,
        d_output_decoder=64,
        d_output=12,
        d_hidden_encoder=128,
        d_hidden_decoder=128,
        n_head_encoder=4,
        n_head_decoder=4,
        n_position_encoder=50,
        n_position_decoder=50,
    )
    model = Transformer(**params)
    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
        n_gpus = torch.cuda.device_count()
        model = model.cuda()
    # model = torch.nn.DataParallel(model, device_ids=[i for i in range(n_gpus)])
    loss_function = torch.nn.MSELoss()
    # loss_function = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters())
    model = load_model('checkpoint', None, model)
    optimizer = load_optimizer('checkpoint', None, optimizer)
    try:
        training_epoch = find_last_checkpoint_epoch('checkpoint')
        print('train form epoch {}'.format(training_epoch + 1))
    except Exception as e:
        print('train from the very begining, {}'.format(e))
        training_epoch = -1
    for epoch in range(training_epoch + 1, n_epoch):
        train(
            model=model,
            loss_function=loss_function,
            optimizer=optimizer,
            dataloader=dataloader,
            epoch=epoch,
            gpu_available=False if gpu_id is None else True,
        )
