from __future__ import absolute_import

import importlib
import os
import os.path as osp
import random
import math
import sys
import time
from turtle import xcor

sys.path.append('./')

import numpy as np
import torch
import torch.distributed
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader, SubsetRandomSampler, DistributedSampler, RandomSampler
from torch.nn.parallel import DistributedDataParallel, DataParallel

from config import get_args

# aster
from lib.trainers import Trainer
from lib.evaluators import Evaluator
from lib.datasets.dataset import AlignCollate
# from lib.models.model_builder import ModelBuilder

# from lib.models.model_builder_one_point import ModelBuilder
# from lib.models.model_builder_one_point_parallel import ModelBuilder
# from lib.models.model_builder_one_point_step import ModelBuilder

# transformer
# from lib.trainers import TrainerTrans as Trainer
# from lib.evaluators import EvaluatorTrans as Evaluator
# from lib.datasets.dataset import AlignCollateTrans as AlignCollate
# from lib.models.model_builder_trans import ModelBuilder

from lib.datasets.dataset import LmdbDataset
from lib.datasets.concatdataset import ConcatDataset
from lib.utils.distributed_ops import get_rank, init_distribute_mode, SequentialDistributedSampler
from lib.utils.logging import Logger, TBLogger  # TFLogger
from lib.utils.serialization import load_checkpoint


def get_data(distributed, data_dir, voc_type, max_len, num_samples, load_all, height, width, batch_size, workers, is_train, keep_ratio):
    if isinstance(data_dir, list):
        dataset_list = []
        for data_dir_ in data_dir:
            dataset_list.append(LmdbDataset(data_dir_, voc_type, max_len, num_samples, load_all=load_all))
        dataset = ConcatDataset(dataset_list)
    else:
        dataset = LmdbDataset(data_dir, voc_type, max_len, num_samples, load_all=load_all)
    print('total image: ', len(dataset))

    if distributed == "DDP":
        if is_train:
            sample = DistributedSampler(dataset)
        else:
            # 测试性能使用自己定义的Sampler
            sample = SequentialDistributedSampler(dataset, batch_size)
    else:
        sample = RandomSampler(dataset)

    if is_train:
        data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=workers, sampler=sample,
                                 pin_memory=True, drop_last=True,
                                 collate_fn=AlignCollate(imgH=height, imgW=width, keep_ratio=keep_ratio))
    else:
        data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=workers, sampler=sample,
                                 pin_memory=True, drop_last=False,
                                 collate_fn=AlignCollate(imgH=height, imgW=width, keep_ratio=keep_ratio))

    return dataset, data_loader


def get_dataset(data_dir, voc_type, max_len, num_samples):
    if isinstance(data_dir, list):
        dataset_list = []
        for data_dir_ in data_dir:
            dataset_list.append(LmdbDataset(data_dir_, voc_type, max_len, num_samples))
        dataset = ConcatDataset(dataset_list)
    else:
        dataset = LmdbDataset(data_dir, voc_type, max_len, num_samples)
    print('total image: ', len(dataset))
    return dataset


def get_dataloader(synthetic_dataset, real_dataset, height, width, batch_size, workers,
                   is_train, keep_ratio):
    num_synthetic_dataset = len(synthetic_dataset)
    num_real_dataset = len(real_dataset)

    synthetic_indices = list(np.random.permutation(num_synthetic_dataset))
    synthetic_indices = synthetic_indices[num_real_dataset:]
    real_indices = list(np.random.permutation(num_real_dataset) + num_synthetic_dataset)
    concated_indices = synthetic_indices + real_indices
    assert len(concated_indices) == num_synthetic_dataset

    sampler = SubsetRandomSampler(concated_indices)
    concated_dataset = ConcatDataset([synthetic_dataset, real_dataset])
    print('total image: ', len(concated_dataset))

    data_loader = DataLoader(concated_dataset, batch_size=batch_size, num_workers=workers,
                             shuffle=False, pin_memory=True, drop_last=True, sampler=sampler,
                             collate_fn=AlignCollate(imgH=height, imgW=width, keep_ratio=keep_ratio))
    return concated_dataset, data_loader


def init_seeds(seed=0, cuda_deterministic=False):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
    if cuda_deterministic:  # slower, more reproducible
        cudnn.deterministic = True
        cudnn.benchmark = False
    else:  # faster, less reproducible
        cudnn.deterministic = False
        cudnn.benchmark = True


def get_model(model_name, *args, **kwargs):
    names = model_name.split('.')
    module_name, class_name = '.'.join(names[:-1]), names[-1]
    cls = getattr(importlib.import_module(module_name), class_name)
    model = cls(*args, **kwargs)
    return model


def main(args):
    if args.distributed == "DDP":
        init_distribute_mode(args)
    rank = get_rank()
    init_seeds(1 + rank)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Redirect print to both console and log file
    if not args.evaluate:
        # make symlink
        # make_symlink_if_not_exists(osp.join(args.real_logs_dir, args.logs_dir), osp.dirname(osp.normpath(args.logs_dir)))
        os.makedirs(args.logs_dir, exist_ok=True)
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
        train_tfLogger = TBLogger(osp.join(args.logs_dir, 'train'))
        eval_tfLogger = TBLogger(osp.join(args.logs_dir, 'eval'))

    # Save the args to disk
    if not args.evaluate:
        cfg_save_path = osp.join(args.logs_dir, 'cfg.txt')
        cfgs = vars(args)
        with open(cfg_save_path, 'w') as f:
            for k, v in cfgs.items():
                f.write('{}: {}\n'.format(k, v))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (32, 100)

    test_dataset, test_loader = \
        get_data(args.distributed, args.test_data_dir, args.voc_type, args.max_len, args.num_test, args.load_all,
                 args.height, args.width, args.batch_size, args.workers, False, args.keep_ratio)
    if not args.evaluate:
        train_dataset, train_loader = \
            get_data(args.distributed, args.synthetic_train_data_dir, args.voc_type, args.max_len, args.num_train, args.load_all,
                     args.height, args.width, args.batch_size, args.workers, True, args.keep_ratio)

    if args.evaluate:
        max_len = test_dataset.max_len
    else:
        max_len = max(train_dataset.max_len, test_dataset.max_len)
        train_dataset.max_len = test_dataset.max_len = max_len
    # Create model
    model = get_model(args.model, arch=args.arch, rec_num_classes=test_dataset.rec_num_classes,
                      sDim=args.decoder_sdim, attDim=args.attDim, max_len_labels=max_len,
                      eos=test_dataset.char2id[test_dataset.EOS], STN_ON=args.STN_ON, print_model_size=args.evaluate)
    model = model.to(device)

    # Load from checkpoint
    if args.evaluation_metric == 'accuracy':
        best_res = 0
    elif args.evaluation_metric == 'editdistance':
        best_res = math.inf
    else:
        raise ValueError("Unsupported evaluation metric:", args.evaluation_metric)
    start_epoch = 0
    start_iters = 0

    if args.resume:
        checkpoint = load_checkpoint(args.resume, device)
        model.load_state_dict(checkpoint['state_dict'])
        model = model.to(device)

        # compatibility with the epoch-wise evaluation version
        if 'epoch' in checkpoint.keys():
            start_epoch = checkpoint['epoch']
        else:
            start_iters = checkpoint['iters']
            start_epoch = int(start_iters // len(train_loader)) if not args.evaluate else 0
        best_res = checkpoint['best_res']
        print("=> Start iters {}  best res {:.1%}"
              .format(start_iters, best_res))

    if args.distributed == "DDP":
        model = DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)  # , find_unused_parameters=True)
    elif args.distributed == "DP":
        model = DataParallel(model)
    # Evaluator
    evaluator = Evaluator(model, args.evaluation_metric, device, print_model_size=args.evaluate)  # 使用model.module实际速度为单卡的速度

    if args.evaluate:
        names, test_datasets, test_loaders = [], [], []
        if isinstance(args.test_data_dir, list):
            for test_data_dir in args.test_data_dir:
                test_dataset, test_loader = get_data(args.distributed, test_data_dir, args.voc_type, args.max_len, args.num_test, args.load_all, args.height, args.width, args.batch_size, args.workers,
                                                     False,
                                                     args.keep_ratio)
                names.append(test_data_dir)
                test_datasets.append(test_dataset)
                test_loaders.append(test_loader)
        else:
            test_dataset, test_loader = get_data(args.distributed, args.test_data_dir, args.voc_type, args.max_len, args.num_test, args.load_all, args.height, args.width, args.batch_size,
                                                 args.workers, False,
                                                 args.keep_ratio)
            names.append(args.test_data_dir)
            test_datasets.append(test_dataset)
            test_loaders.append(test_loader)
        start = time.time()
        for name, td, tl in zip(names, test_datasets, test_loaders):
            name = os.path.basename(name)
            print(name)
            if args.vis:
                test_vis_dir = os.path.join(os.path.dirname(args.resume), "eval/vis", name)
                os.makedirs(test_vis_dir, exist_ok=True)
            else:
                test_vis_dir = None
            evaluator.evaluate(tl, dataset=td, print_freq=10000, vis_dir=test_vis_dir)
        print('it took {0} s.'.format(time.time() - start))
        return

    # Optimizer
    param_groups = model.parameters()
    param_groups = filter(lambda p: p.requires_grad, param_groups)
    optimizer = optim.Adadelta(param_groups, lr=args.lr, weight_decay=args.weight_decay)
    # optimizer = optim.Adam(param_groups, lr=0.001)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=0.1)

    # Trainer
    loss_weights = {}
    loss_weights['loss_rec'] = 1.
    loss_weights['loss_dist'] = 0.5  # one_point_v4.12.10: 0.5; one_point_v4.12.10: 5
    if args.debug:
        args.print_freq = 1
    trainer = Trainer(model, args.evaluation_metric, args.logs_dir,
                      iters=start_iters, best_res=best_res, grad_clip=args.grad_clip,
                      device=device, loss_weights=loss_weights, print_model_size=args.evaluate)

    # Start training
    # evaluator.evaluate(test_loader, step=0, tfLogger=eval_tfLogger, dataset=test_dataset)  # TODO: to be removed
    for epoch in range(start_epoch, args.epochs):
        if args.vis:
            train_vis_dir = os.path.join(args.logs_dir, f"train/vis/epoch_{epoch}")
            os.makedirs(train_vis_dir, exist_ok=True)
        else:
            train_vis_dir = None
        current_lr = optimizer.param_groups[0]['lr']
        print(f"A new epoch: {epoch}, learning rate: {current_lr}")
        if isinstance(model, DistributedDataParallel):
            train_loader.sampler.set_epoch(epoch)  # 设置sampler的epoch，DistributedSampler需要这个来维持各个进程之间的相同随机数种子
        trainer.train(epoch, train_loader, optimizer, current_lr,
                      print_freq=args.print_freq,
                      train_tfLogger=train_tfLogger,
                      is_debug=args.debug,
                      evaluator=evaluator,
                      test_loader=test_loader,
                      eval_tfLogger=eval_tfLogger,
                      test_dataset=test_dataset,
                      test_freq=args.test_freq,
                      vis_dir=train_vis_dir)
        scheduler.step(epoch)

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'), device)
    if isinstance(model, DistributedDataParallel) or isinstance(model, DataParallel):
        model.module.load_state_dict(checkpoint['state_dict'])
    else:
        model.load_state_dict(checkpoint['state_dict'])
    evaluator.evaluate(test_loader, dataset=test_dataset)

    # Close the tensorboard logger
    train_tfLogger.close()
    eval_tfLogger.close()


if __name__ == '__main__':
    # parse the config
    _args = get_args(sys.argv[1:])
    main(_args)
