import argparse

from torch.utils.data import DataLoader

from model.bert import BERT
from trainer.pretrain import BERTTrainer
from dataset.dataset import BERTDataset
from dataset.vocab import WordVocab
import os
import time
import logging
import datetime
import random
import numpy as np
import torch

def get_timestamp():
    return( datetime.datetime.utcnow() + datetime.timedelta(hours=8)).strftime('%y%m%d-%H%M%S')

def setup_logger(logger_name, save_dir, phase, level = logging.INFO, screen = False, to_file = False):
    lg = logging.getLogger(logger_name)
    formatter = logging.Formatter('%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s', datefmt = '%y-%m-%d %H:%M:%S')
    lg.setLevel(level)
    if to_file:
        log_file = os.path.join(save_dir, phase + '_{}.log'.format(get_timestamp()))
        fh = logging.FileHandler(log_file, mode = 'w')
        fh.setFormatter(formatter)
        lg.addHandler(fh)
    if screen:
        sh = logging.StreamHandler()
        sh.setFormatter(formatter)
        lg.addHandler(sh)

log_path = './log'

def train():
    parser = argparse.ArgumentParser()

    #parser.add_argument("-c", "--train_dataset", required=True, type=str, help="train dataset for train bert")
    #parser.add_argument("-t", "--test_dataset", type=str, default=None, help="test set for evaluate train set")
    #parser.add_argument("-v", "--vocab_path", required=True, type=str, help="built vocab model path with bert-vocab")
    #parser.add_argument("-o", "--output_path", required=True, type=str, help="ex)output/bert.model")

    parser.add_argument("-hs", "--hidden", type=int, default=256, help="hidden size of transformer model")
    parser.add_argument("-l", "--layers", type=int, default=8, help="number of layers")
    parser.add_argument("-a", "--attn_heads", type=int, default=8, help="number of attention heads")
    parser.add_argument("-s", "--seq_len", type=int, default=20, help="maximum sequence len")

    parser.add_argument("-b", "--batch_size", type=int, default=64, help="number of batch_size")
    parser.add_argument("-e", "--epochs", type=int, default=10, help="number of epochs")
    parser.add_argument("-w", "--num_workers", type=int, default=5, help="dataloader worker size")

    parser.add_argument("--with_cuda", type=bool, default=True, help="training with CUDA: true, or false")
    parser.add_argument("--log_freq", type=int, default=10, help="printing loss every n iter: setting n")
    parser.add_argument("--corpus_lines", type=int, default=None, help="total number of lines in corpus")
    parser.add_argument("--cuda_devices", type=int, nargs='+', default=None, help="CUDA device ids")
    parser.add_argument("--on_memory", type=bool, default=True, help="Loading on memory: true or false")

    parser.add_argument("--lr", type=float, default=1e-3, help="learning rate of adam")
    parser.add_argument("--adam_weight_decay", type=float, default=0.01, help="weight_decay of adam")
    parser.add_argument("--adam_beta1", type=float, default=0.9, help="adam first beta value")
    parser.add_argument("--adam_beta2", type=float, default=0.999, help="adam first beta value")

    args = parser.parse_args()

    seed = 1234
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = True


    if not os.path.exists(log_path):
        os.makedirs(log_path)

    setup_logger('base', log_path, 'train', level = logging.INFO, screen = True, to_file = True)
    logger = logging.getLogger(name = 'base')

    #print("Loading Vocab", args.vocab_path)
    #vocab = WordVocab.load_vocab(args.vocab_path)
    #print("Vocab Size: ", len(vocab))

    #print("Loading Train Dataset", args.train_dataset)
    #train_dataset = BERTDataset(args.train_dataset, vocab, seq_len=args.seq_len,
    #                            corpus_lines=args.corpus_lines, on_memory=args.on_memory)

    #print("Loading Test Dataset", args.test_dataset)
    #test_dataset = BERTDataset(args.test_dataset, vocab, seq_len=args.seq_len, on_memory=args.on_memory) \
    #    if args.test_dataset is not None else None

    #print("Creating Dataloader")
    #train_data_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers)
    #test_data_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers) \
    #    if test_dataset is not None else None

    #print("Building BERT model")

    #print("Creating BERT Trainer")
    #trainer = BERTTrainer(bert, len(vocab), train_dataloader=train_data_loader, test_dataloader=test_data_loader,
    #                      lr=args.lr, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay,
    #                      with_cuda=args.with_cuda, cuda_devices=args.cuda_devices, log_freq=args.log_freq)

    #print("Training Start")
    #for epoch in range(args.epochs):
    #    trainer.train(epoch)
    #    trainer.save(epoch, args.output_path)

    #    if test_data_loader is not None:
    #        trainer.test(epoch)

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    model = BERT(16, hidden=args.hidden, n_layers=args.layers, attn_heads=args.attn_heads).to(device)
    model.eval()
    logger.info(device)
    input = torch.ones(64, 20).to(device).long()
    seg_input = torch.ones(64, 20).to(device).long()
    logger.info('input_size:{}  seg_size:{}'.format(input.shape, seg_input.shape))

    for _ in range(5):
        start = time.time()
        output = model(input, seg_input)
        torch.cuda.synchronize()
        warm_up = time.time() - start
        logger.info("Warm up time:{:.4f}".format(warm_up))

    with torch.autograd.profiler.profile(enabled = True, use_cuda = True, record_shapes = False, profile_memory = False) as prof:
        outputs = model(input, seg_input)

    logger.info("\n"+prof.table())
if __name__ == "__main__":
    train()
