from __future__ import division
from __future__ import print_function

import argparse
import datetime
import os
import os.path as osp
import sys
import time
import warnings

os.environ['CUDA_VISIBLE_DEVICES'] = "2,3"
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader

import data_manager
from engine import trainer, tester
from models.PSTA import PSTA
# from onnx import save_onnx
from tools.generaltools import set_random_seed, get_cfg
from tools.loggers import Logger
from tools.torchtools import count_num_param
from utils import transforms as T
from utils.losses import CrossEntropyLabelSmooth, TripletLoss
from utils.lr_schedulers import WarmupMultiStepLR
from utils.samplers import RandomIdentitySampler
from utils.utils import mkdir_if_missing, make_optimizer, save_checkpoint
from utils.video_loader import VideoDataset

sys.path.append(os.getcwd())


def main():
    cfg = get_cfg()
    cfg.merge_from_file(args.config)
    cfg.MODE = "train"
    cfg.params.RESUME = False
    # 中南大学开发时需配置pycharm中的运行参数 --developer_mode True
    if args.developer_mode:
        cls = "video"
        time_now = time.strftime("%Y%m%d-%H:%M", time.localtime())
        cfg.params.LOG_PATH = os.path.join(cfg.params.LOG_PATH, cfg.MODE, cls, time_now)
        cfg.params.CHECKPOINT_SAVE_PATH = os.path.join(cfg.params.LOG_PATH, "checkpoints")
        cfg.params.ONNX_SAVE_PATH = os.path.join(cfg.params.LOG_PATH, "ONNX")

    cfg.freeze()
    set_random_seed(cfg.RANDOM_SEED)
    use_gpu = torch.cuda.is_available()
    log_name = 'log_train.txt'
    # checkpoint存储目录
    checkpoint_save_dir = cfg.params.CHECKPOINT_SAVE_PATH
    mkdir_if_missing(checkpoint_save_dir)
    # onnx模型存储目录
    onnx_save_dir = cfg.params.ONNX_SAVE_PATH
    mkdir_if_missing(onnx_save_dir)
    # 日志存储目录
    logdir = osp.join(cfg.params.LOG_PATH, log_name)
    mkdir_if_missing(osp.dirname(logdir))
    # tensorboard存储目录
    tbdir = osp.join(cfg.params.LOG_PATH)
    mkdir_if_missing(tbdir)
    writer = SummaryWriter(tbdir)
    sys.stdout = Logger(logdir)

    s = str(args).split(", ")
    print("==========args==========")
    for i in range(len(s)):
        print(s[i])
    print("=========================")

    if use_gpu:
        torch.cuda.empty_cache()
        print(torch.cuda.device_count())
        print("Using GPU {}!".format(args.gpu_devices))
        cudnn.benchmark = True
    else:
        warnings.warn('Currently using CPU, however, GPU is highly recommended')

    print('\n初始化DataManager')
    dataset = data_manager.init_dataset(mode=cfg.MODE,
                                        cfg=cfg)
    print('\n模型初始化')
    model = PSTA(num_classes=dataset.num_train_pids,
                 pretrain_choice=cfg.MODEL.PRETRAIN_CHOICE,
                 model_name=cfg.MODEL.NAME,
                 seq_len=cfg.MODEL.SEQ_LEN)
    print('Model size: {:.3f} M'.format(count_num_param(model)))
    model = nn.DataParallel(model).cuda() if use_gpu else model

    # 定义dataloader
    transform_train = T.Compose([
        T.resize(cfg.INPUT.SIZE_TRAIN),
        T.random_horizontal_flip(),
        T.to_tensor(),
        T.normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        T.random_erasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
    ])

    transform_test = T.Compose([
        T.Resize(cfg.INPUT.SIZE_TEST),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    pin_memory = True if use_gpu else False
    video_sampler = RandomIdentitySampler(dataset.train,
                                          num_instances=cfg.DATALOADER.NUM_INSTANCE)

    trainloader = DataLoader(
        dataset=VideoDataset(dataset=dataset.train,
                             seq_len=cfg.MODEL.SEQ_LEN,
                             sample=cfg.params.TRAIN_SAMPLER,
                             transform=transform_train),
        sampler=video_sampler,
        batch_size=cfg.SOLVER.SEQS_PER_BATCH,
        num_workers=cfg.DATALOADER.NUM_WORKERS,
        pin_memory=pin_memory,
        drop_last=True
    )
    if cfg.params.TEST_SAMPLER == 'dense':
        print('Build dense sampler')
        queryloader = DataLoader(
            dataset=VideoDataset(dataset=dataset.query,
                                 seq_len=cfg.MODEL.SEQ_LEN,
                                 sample=cfg.params.TEST_SAMPLER,
                                 transform=transform_test,
                                 max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
            batch_size=1,
            shuffle=False,
            num_workers=cfg.DATALOADER.NUM_WORKERS,
            pin_memory=pin_memory,
            drop_last=False
        )

        galleryloader = DataLoader(
            dataset=VideoDataset(dataset=dataset.gallery,
                                 seq_len=cfg.MODEL.SEQ_LEN,
                                 sample=cfg.params.TEST_SAMPLER,
                                 transform=transform_test,
                                 max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
            batch_size=1,
            shuffle=False,
            num_workers=cfg.DATALOADER.NUM_WORKERS,
            pin_memory=pin_memory,
            drop_last=False,
        )
    else:
        queryloader = DataLoader(
            dataset=VideoDataset(dataset=dataset.query,
                                 seq_len=cfg.MODEL.SEQ_LEN,
                                 sample=cfg.params.TEST_SAMPLER,
                                 transform=transform_test,
                                 max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
            batch_size=cfg.params.SEQS_PER_BATCH,
            shuffle=False,
            num_workers=cfg.DATALOADER.NUM_WORKERS,
            pin_memory=pin_memory,
            drop_last=False
        )

        galleryloader = DataLoader(
            dataset=VideoDataset(dataset=dataset.gallery,
                                 seq_len=cfg.MODEL.SEQ_LEN,
                                 sample=cfg.params.TEST_SAMPLER,
                                 transform=transform_test,
                                 max_seq_len=cfg.params.TEST_MAX_SEQ_NUM),
            batch_size=cfg.params.SEQS_PER_BATCH,
            shuffle=False,
            num_workers=cfg.DATALOADER.NUM_WORKERS,
            pin_memory=pin_memory,
            drop_last=False
        )
    # 定义损失函数
    xent = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids)

    tent = TripletLoss(cfg.SOLVER.MARGIN, distance=cfg.params.TRIPLET_DISTANCE)

    optimizer = make_optimizer(cfg, model)
    scheduler = WarmupMultiStepLR(optimizer,
                                  cfg.SOLVER.STEPS,
                                  cfg.SOLVER.GAMMA,
                                  cfg.SOLVER.WARMUP_FACTOR,
                                  cfg.SOLVER.WARMUP_ITERS,
                                  cfg.SOLVER.WARMUP_METHOD)

    # if cfg.params.RESUME:
    #     if check_isfile(cfg.params.RESUME_CHECKPOINT_PATH):
    #         cfg.defrost()
    #         cfg.params.START_EPOCH = resume_from_checkpoint(cfg.params.RESUME_CHECKPOINT_PATH, model,
    #                                                             optimizer=FEM_optimizer, is_eval=False)
    #         cfg.freeze()

    time_start = time.time()
    print('=> 开始训练')
    best_rank = 0
    for epoch in range(cfg.params.START_EPOCH, cfg.params.MAX_EPOCHS):
        print("==> Epoch {}/{}".format(epoch + 1, cfg.params.MAX_EPOCHS))
        print("current lr:", scheduler.get_lr()[0])
        trainer(model, trainloader, xent, tent, optimizer, use_gpu)
        scheduler.step()
        torch.cuda.empty_cache()
        if (epoch + 1) >= cfg.params.VAL_START_EPOCHS and (epoch + 1) % cfg.params.VAL_EPOCHS == 0 or (
                epoch + 1) == cfg.params.MAX_EPOCHS:
            print('==> 模型验证')
            _, metrics = tester(model, queryloader, galleryloader, use_gpu, test_distance=cfg.params.TEST_DISTANCE)
            rank1 = metrics[0]
            if best_rank < rank1:
                best_rank = rank1
                is_best = True
            else:
                is_best = False

            save_checkpoint({
                'state_dict': model.state_dict(),
                'rank1': rank1,
                'epoch': epoch,
                'optimizer': optimizer.state_dict(),
            }, is_best=is_best, fpath=checkpoint_save_dir, ck_name=f"epoch_{epoch + 1}.pth")

    """
    将best_model.pth转存为onnx, 有bug，暂时搁置
    """
    # state_dict = torch.load(os.path.join(checkpoint_save_dir, "best_model.pth"))
    # new_state_dict = OrderedDict()
    # for k, v in state_dict['state_dict'].items():
    #     name = k[7:]
    #     if "classifier" in k:
    #         continue
    #     new_state_dict[name] = v
    # model.load_state_dict(new_state_dict, strict=False)
    # onnx_name = os.path.join(onnx_save_dir, "best_model.onnx")
    # save_onnx(model, onnx_name)

    elapsed = round(time.time() - time_start)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print('Elapsed {}'.format(elapsed))


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="vesselreid Training")
    parser.add_argument('--config', type=str, default="config_chuanbo_vreid_train.yml",
                        help='the file of train/test/infer')
    parser.add_argument('--config_file_path', type=str, default="",
                        help='the config file of train/test/infer from the AI plate!')
    parser.add_argument('--project_dir', type=str, default="",
                        help='project\'s root direction')
    parser.add_argument('--gpu-devices', default='2,3', type=str,
                        help='gpu device ids for CUDA_VISIBLE_DEVICES')
    parser.add_argument('--developer_mode', default=False, type=bool,
                        help='true：中南用的开发者模式 false：提交给远望时的模式')
    args = parser.parse_args()
    args.project_dir = os.path.dirname(os.path.dirname(__file__))
    args.config = os.path.join(args.project_dir, args.config)
    # args.config = args.config_file_path  # 自己调试时注释掉该行
    main()
