import argparse
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from reid_libs.datasets_manager import get_dataset
from reid_libs.utils.data_utils.person_dataloader import PersonImageDataset
from reid_libs.utils.data_utils.person_transform import personTrainTransform, personTestTransform
from reid_libs.models import create_model
import os
import os.path as osp
import datetime
import time
from reid_libs.utils.misc import AverageMeter, save_checkpoint, save_best_checkpoint, write_to_logfile
from reid_libs.losses.loss_set import TripletHardLoss, CrossEntropyLabelSmoothLoss
from reid_libs.utils.data_utils.person_sampler import RandomIdentitySampler

BASEDIR = osp.dirname(osp.abspath(__file__))


def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)


def config_paths(args):
    path_runs_dir = osp.join(BASEDIR, args.save_dir)
    runtime = datetime.datetime.now()
    runtime_stamp = str(runtime).replace(
        ' ', '_').replace(':', '_').replace('.', '_')
    RUNS_DIR = osp.join(path_runs_dir, runtime_stamp)
    CKPT_DIR = osp.join(RUNS_DIR, 'checkpoints')
    LOG_DIR = osp.join(RUNS_DIR, 'logs')
    for DIR in [CKPT_DIR, LOG_DIR]:
        if not osp.exists(DIR):
            os.makedirs(DIR)
    if args.eval_only:
        LOGFILENAME = 'testing_log.txt'
    else:
        LOGFILENAME = 'training_log.txt'
    PATH_LOGFILE = osp.join(LOG_DIR, LOGFILENAME)
    return CKPT_DIR, PATH_LOGFILE


def do_arg_parse():
    parser = argparse.ArgumentParser()

    # device
    parser.add_argument('-gpu', '--useGPU', type=bool, default=True)

    # dataset
    parser.add_argument('-r', '--root', type=str, default=r'/data1/sdy/fast-reid/datasets',
                        help='Path of container folder of all datasets')
    parser.add_argument('-d', '--dataset', type=str, default='Market-1501-v15.09.15',
                        help='The name of the selected dataset')

    # model and loss
    parser.add_argument('-a', '--arch', type=str, default='PersonResNet')
    parser.add_argument('--height', type=int,
                        default=256, help='input_image_height')
    parser.add_argument('--width', type=int,
                        default=128, help='input_image_width')
    # parser.add_argument('-', '--', type=, default=, help=)
    parser.add_argument('--num_feat', type=int, default=2048, help='length of feature vector')  # ???
    parser.add_argument('--alpha', type=float, default=0.5,  # ??
                        help='alpha is the hyper-weight of metric loss, '
                        'on the other hand, the weight of '
                        'classification loss would be (1-alpha)')

    # optimizer
    parser.add_argument('--optim', type=str, default='adam',
                        help='selected optimizer')
    parser.add_argument('-lr', '--LR', type=float,
                        default=0.00035, help='learning_rate')  # ??
    parser.add_argument('--gamma', type=float, default=0.1)
    parser.add_argument('--upstep', type=int, default=40)
    parser.add_argument('--weight_decay', type=float,
                        default=5e-04, help="weight decay")
    parser.add_argument('--labelsmooth', type=bool,
                        default=False, help='label smooth')  # ?

    # training-stage # dataloader # weight-loading
    parser.add_argument('--max_epoch', type=int, default=120)
    parser.add_argument('--resume', type=str, default=None,
                        metavar='PATH', help='path of model checkpoint file')
    parser.add_argument('-b', '--batch_size', type=int, default=64)  # ??
    parser.add_argument('-n', '--num_instances', type=int, default=4)  # ??
    parser.add_argument('-j', '--num_workers', type=int, default=1)
    parser.add_argument('--seed', type=int, default=0)

    # config of log and save
    parser.add_argument('--print_freq', type=int,
                        default=10, help='print frequency')
    parser.add_argument('--eval_interval', type=int, default=20, help='eval after every N epochs')
    parser.add_argument('--save_dir', type=str, default='runs', metavar='PATH')

    # misc
    parser.add_argument('--eval-only', action='store_true',
                        help='evaluation only')
    parser.add_argument('--feat_metric_name', type=str, default='f_euc_rank1',
                        help='a chosen key in score_dict for best-saving')
    parser.add_argument('--ImageNetPretrained', type=bool, default=True)

    args = parser.parse_args()
    # print(args)
    return args


def analyze_model_weights(model):
    # print(model.state_dict())
    print(model.module.classifier.state_dict())
    # print(10 * '=' + ' weight ' + '=' * 10)
    # print(model.module.classifier.state_dict()['weight'].grad)
    # print(10 * '=' + ' bias ' + '=' * 10)
    # print(model.module.classifier.state_dict()['bias'].grad)


def train(epoch, model, losses, optimizer, scheduler, train_loader, device, pathLogfile):
    model.train()
    scheduler.step()
    print(f"LR up to: {optimizer.state_dict()['param_groups'][0]['lr']}")

    loss_value_cls = AverageMeter()
    loss_value_mtc = AverageMeter()
    loss_value = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    prec_value = AverageMeter()

    time_A = time.time()
    for b_idx, (imgs, path_imgs, pids, camids) in enumerate(train_loader):
        imgs, pids = imgs.to(device), pids.to(device)

        data_time.update(time.time() - time_A)

        feat_, feat, ys = model(imgs)
        loss1 = losses["criterion_class"](ys, pids)
        # loss2 = losses["criterion_metric"](feat_, pids)
        # loss = (1 - args.alpha) * loss1 + args.alpha * loss2

        loss2 = torch.tensor(0.)
        loss = loss1

        optimizer.zero_grad()
        loss.backward()

        # ========== analysis of gradient ============
        analyze_model_weights(model)
        # ========== analysis of gradient ============

        optimizer.step()

        batch_time.update(time.time() - time_A)

        loss_value_cls.update(loss1.item(), pids.size(0))
        loss_value_mtc.update(loss2.item(), pids.size(0))

        loss_value.update(loss.item(), pids.size(0))

        # classification accuracy in this batch
        _, preds = torch.max(ys.data, 1)
        total = pids.size(0)
        correct = (preds == pids).squeeze().sum().cpu().numpy()
        acc = correct / total
        prec_value.update(acc)

        put_str = \
            f'Epoch {epoch}\t' \
            f'Iter {b_idx}\t' \
            f'Loss_cls {loss_value_cls.val:.4f} ({loss_value_cls.avg:.4f})\t' \
            f'Loss_mtc {loss_value_mtc.val:.4f} ({loss_value_mtc.avg:.4f})\t' \
            f'Prec {prec_value.val:.4f} ({prec_value.avg:.4f})\t'

        # f'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' \
        # f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
        # f'Loss {loss_value.val:.4f} ({loss_value.avg:.4f})\t' \

        print(put_str)
        write_to_logfile(pathLogfile, put_str)

        time_A = time.time()


def test(model, dataset, query_loader, gallery_loader, device, pathLogfile):
    from reid_libs.utils.testing_utils.img_evaluators import ImgEvaluator
    evaluator = ImgEvaluator(model, selected_device=device)
    feats_list = ['feat_']
    score_dict = evaluator.eval_worerank(query_loader,
                                         gallery_loader,
                                         dataset.query,
                                         dataset.gallery,
                                         metric=['euclidean'],
                                         types_list=feats_list,
                                         pathLogfile=pathLogfile)
    return score_dict[args.feat_metric_name]


def main(args, ckptDir, pathLogfile):
    # display config hyper-params
    print(args)
    write_to_logfile(pathLogfile, f"{args}")

    # set random seed
    set_seed(args.seed)

    # set device
    if args.useGPU and torch.cuda.is_available():
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")
    print("using device:", device)
    write_to_logfile(pathLogfile, f"using device: {device}")

    # get selected dataset
    print("Initializing dataset {}".format(args.dataset))
    dataset = get_dataset(args.dataset, args.root)

    train_loader = DataLoader(
        PersonImageDataset(dataset.train, transform=personTrainTransform),
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        sampler=RandomIdentitySampler(dataset.train, args.num_instances),
        pin_memory=True,
        drop_last=True,
    )

    query_loader = DataLoader(
        PersonImageDataset(dataset.query, transform=personTestTransform),
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        shuffle=False,
        pin_memory=True,
        drop_last=False,
    )

    gallery_loader = DataLoader(
        PersonImageDataset(dataset.gallery, transform=personTestTransform),
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        shuffle=False,
        pin_memory=True,
        drop_last=False,
    )

    # 模型
    print("Initializing model: {}".format(args.arch))
    model = create_model(name=args.arch,
                         num_classes=dataset.num_train_pids,
                         height=args.height,
                         width=args.width,
                         pretrained=args.ImageNetPretrained,
                         num_feat=args.num_feat)

    # 损失
    if args.labelsmooth:
        criterion_class = CrossEntropyLabelSmoothLoss(
            num_classes=dataset.num_train_pids)
    else:
        criterion_class = torch.nn.CrossEntropyLoss()
    criterion_metric = TripletHardLoss(margin=0.3)
    losses = {"criterion_class": criterion_class,
              "criterion_metric": criterion_metric,
              }

    # 优化器
    optimizer = torch.optim.Adam(model.parameters(), lr=args.LR, weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer, step_size=args.upstep, gamma=args.gamma)

    # 并行化
    model = torch.nn.DataParallel(model).to(device)

    # 载入模型权重
    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
    else:
        start_epoch = 0
    print('start epoch:', start_epoch)

    # # 并行化
    # model = torch.nn.DataParallel(model).to(device)

    # === For testing only ===
    if args.eval_only:
        print("Evaluate Only!")
        test(model, dataset, query_loader, gallery_loader, device, pathLogfile)
        return

    # === start training ===
    print("start training...")
    history_best_score = 0.0
    for epoch in range(start_epoch+1, args.max_epoch):
        train(epoch, model, losses, optimizer, scheduler,
              train_loader, device, pathLogfile)

        if (epoch+1) % args.eval_interval != 0:
            continue

        # do validation and save ckpt
        score = test(model, dataset, query_loader,
                     gallery_loader, device, pathLogfile)

        if score > history_best_score:
            print('best yield!')
            is_best = True
            history_best_score = score
        else:
            is_best = False

        current_lr = optimizer.state_dict()['param_groups'][0]['lr']

        save_checkpoint(
            stateDict={
                'state_dict': model.state_dict(),
                args.feat_metric_name: score,  # check name before run !!!
                'epoch': epoch,
                'LR': current_lr,
            },
            epoch=epoch,
            isBest=is_best,
            ckptDir=ckptDir)


if __name__ == '__main__':
    args = do_arg_parse()
    ckptDir, pathLogfile = config_paths(args)
    main(args, ckptDir, pathLogfile)
