import logging
import numpy as np
import os
import time
import torch
import torch.nn as nn
import cv2
from utils.meter import AverageMeter
from utils.metrics import R1_mAP, R1_mAP_Pseudo, eval_func
import json
import datetime

from tensorboardX import SummaryWriter  # 可视化loss和acc
import os
import time
from tqdm import tqdm

try:
    from torch.cuda import amp
except ImportError:
    pass

from model.RMGL import feat_channel


# try:
#     from apex.parallel import DistributedDataParallel as DDP
#     from apex.fp16_utils import *
#     from apex import amp, optimizers
#     from apex.multi_tensor_apply import multi_tensor_applier
# except ImportError:
#     raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")


def do_train(cfg,
             model,
             center_criterion,
             train_loader,
             val_loader_green,
             val_loader_normal,
             optimizer,
             optimizer_center,
             scheduler,
             loss_fn,
             num_query_green,
             num_query_normal,
             start_epoch=0,
             scaler=None
             ):
    """TODO+没有加验证过程，只有训练过程"""
    "可视化loss和acc"
    if not os.path.exists(cfg.OUTPUT_DIR + '/log'):
        os.makedirs(cfg.OUTPUT_DIR + '/log')  # 构建log文件夹
    log_output_dir = cfg.OUTPUT_DIR + '/log/' + cfg.MODEL.NAME  # log输出文件夹
    weight_output_dir = cfg.OUTPUT_DIR + '/' + cfg.MODEL.NAME  # 每个模型的权重都单独保存到指定的一个文件夹内
    Vis = SummaryWriter(log_output_dir)  # log保存在权重文件夹同级文件夹log内
    if not os.path.exists(weight_output_dir):
        os.makedirs(weight_output_dir)  # 构建weight文件夹

    log_period = cfg.SOLVER.LOG_PERIOD
    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    epochs = cfg.SOLVER.MAX_EPOCHS

    device = "cuda"
    logger = logging.getLogger("reid_baseline.train")
    logger.info('start training')

    loss_meter = AverageMeter()
    acc_meter = AverageMeter()

    # train
    for epoch in range(1 + start_epoch, epochs + 1):
        start_time = time.time()
        loss_meter.reset()
        acc_meter.reset()
        "每个epoch都切换为训练模式"
        model.train()
        for n_iter, (img, vid) in enumerate(train_loader):
            optimizer.zero_grad()
            optimizer_center.zero_grad()
            img = img.to(device)
            target = vid.to(device)

            if cfg.SOLVER.FP16:
                "前向过程(model+loss)开启autocast + 缩放loss + 优化第一个优化器"
                with amp.autocast():
                    score_feat = model(img, target)
                    loss = loss_fn(score_feat, target)

                scaler.scale(loss).backward()
                scaler.unscale_(optimizer)
                scaler.step(optimizer)
            else:
                score_feat = model(img, target)
                loss = loss_fn(score_feat, target)
                loss.backward()
                optimizer.step()
            # print("loss1:{:.2f}\t loss2:{:.2f}".format(loss1.item(),loss2.item()))

            if 'center' in cfg.MODEL.METRIC_LOSS_TYPE:
                for _center_criterion in center_criterion:
                    "多个center loss参数都进行更新"
                    for param in _center_criterion.parameters():
                        param.grad.data *= (1. / cfg.SOLVER.CENTER_LOSS_WEIGHT)
                if cfg.SOLVER.FP16:
                    "优化第二个优化器 + scaler更新"
                    scaler.step(optimizer_center)
                    scaler.update()
                else:
                    optimizer_center.step()

            for index_score_feat in range(len(score_feat) // 2):
                score = score_feat[2 * index_score_feat]
                if index_score_feat == 0:
                    acc = (score.max(1)[1] == target).float().mean()  # ID准确率
                else:
                    acc += (score.max(1)[1] == target).float().mean()
            acc /= index_score_feat + 1

            loss_meter.update(loss.item(), img.shape[0])
            acc_meter.update(acc, 1)

            if (n_iter + 1) % log_period == 0:
                logger.info("Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}"
                            .format(epoch, (n_iter + 1), len(train_loader),
                                    loss_meter.avg, acc_meter.avg, scheduler.get_lr()[0]))

        scheduler.step()
        end_time = time.time()
        time_per_epoch = (end_time - start_time)
        time_per_batch = time_per_epoch / (n_iter + 1)
        time_remaining = time_per_epoch * (epochs - epoch) / 60.
        logger.info(
            "Epoch {} done. Time per epoch: {:.1f}[min] Time Remaining: {:.1f}[min] Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]"
                .format(epoch, time_per_epoch / 60., time_remaining, time_per_batch,
                        train_loader.batch_size / time_per_batch))

        if epoch % checkpoint_period == 0:
            "模型保存"
            print("save model: {} ......".format(cfg.MODEL.NAME + '_{}.pth'.format(epoch)))
            checkpoint = {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                # 'optimizer_state_dict': optimizer.state_dict(),
                # 'optimizer_center_state_dict': optimizer_center.state_dict()
            }
            # if cfg.SOLVER.FP16:
            #     checkpoint['amp'] = scaler.state_dict()
            torch.save(checkpoint, os.path.join(weight_output_dir, cfg.MODEL.NAME + '_{}.pth'.format(epoch)))
            # torch.save(model.state_dict(), os.path.join(weight_output_dir, cfg.MODEL.NAME + '_{}.pth'.format(epoch)))

        if epoch % cfg.SOLVER.EVAL_PERIOD == 0:
            "验证集可视化"
            mAP_rank_green, mAP_rank_normal = do_validate(cfg, model, val_loader_green, val_loader_normal,
                                                          num_query_green, num_query_normal)
            "计算mAP rank1 rank5的加权平均"
            mAP_avg = (mAP_rank_green[0] * num_query_green + mAP_rank_normal[0] * num_query_normal) / (
                    num_query_green + num_query_normal)
            rank1_avg = (mAP_rank_green[1] * num_query_green + mAP_rank_normal[1] * num_query_normal) / (
                    num_query_green + num_query_normal)
            rank5_avg = (mAP_rank_green[2] * num_query_green + mAP_rank_normal[2] * num_query_normal) / (
                    num_query_green + num_query_normal)
            Vis.add_scalar('val/avg_mAP', mAP_avg, epoch)
            Vis.add_scalar('val/avg_rank-1', rank1_avg, epoch)
            Vis.add_scalar('val/avg_rank-5', rank5_avg, epoch)

            Vis.add_scalar('val/green_mAP', mAP_rank_green[0], epoch)
            Vis.add_scalar('val/green_rank-1', mAP_rank_green[1], epoch)
            Vis.add_scalar('val/green_rank-5', mAP_rank_green[2], epoch)
            Vis.add_scalar('val/normal_mAP', mAP_rank_normal[0], epoch)
            Vis.add_scalar('val/normal_rank-1', mAP_rank_normal[1], epoch)
            Vis.add_scalar('val/normal_rank-5', mAP_rank_normal[2], epoch)
            logger.info("Epoch {} done. 计算平均mAP rank:\n mAP: {:.3f}\n rank1: {:.3f}\n rank5: {:.3f}"
                        .format(epoch, mAP_avg, rank1_avg, rank5_avg))

            # time.sleep(60)  # 停滞10s，等待内存完全释放
        "训练集可视化"
        Vis.add_scalar('train/epoch/loss', loss_meter.avg, epoch)
        Vis.add_scalar('train/epoch/acc', acc_meter.avg, epoch)
        Vis.add_scalar('train/epoch/learning_rate', scheduler.get_lr()[0], epoch)


def do_validate(cfg,
                model,
                val_loader_green,
                val_loader_normal,
                num_query_green,
                num_query_normal):
    device = "cuda"
    "切换为预测模型"
    model.eval()
    val_loader = [val_loader_green, val_loader_normal]
    for index, loader in enumerate(val_loader):
        "green和normal的 reranking参数不同"
        if index == 0:
            subfix = '1'
            reranking_parameter = [14, 4, 0.4]
            evaluator = R1_mAP(num_query_green, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM,
                               reranking=cfg.TEST.RE_RANKING)
        else:
            subfix = '2'
            reranking_parameter = [10, 3, 0.6]
            evaluator = R1_mAP(num_query_normal, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM,
                               reranking=cfg.TEST.RE_RANKING)

        evaluator.reset()
        "分别保存green和normal的距离矩阵？？？"
        # DISTMAT_PATH = os.path.join(cfg.OUTPUT_DIR, "distmat_{}.npy".format(subfix))
        # QUERY_PATH = os.path.join(cfg.OUTPUT_DIR, "query_path_{}.npy".format(subfix))
        # GALLERY_PATH = os.path.join(cfg.OUTPUT_DIR, "gallery_path_{}.npy".format(subfix))
        have_data = False  # 判断加载器是否存在数据（green有可能没有数据）
        for n_iter, (img, pid, camid, imgpath) in tqdm(enumerate(loader)):
            "循环读入所有的数据，先读入所有query，再读取所有的gallery，逐个计算feat"
            have_data = True
            with torch.no_grad():
                img = img.to(device)
                if cfg.TEST.FLIP_FEATS == 'on':
                    feat = torch.FloatTensor(img.size(0), feat_channel).zero_().cuda()
                    for i in range(2):
                        "图片反转的特征+原图特征"
                        if i == 1:
                            inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda()
                            img = img.index_select(3, inv_idx)
                        f = model(img)
                        feat = feat + f
                else:
                    feat = model(img)

                evaluator.update((feat, imgpath, pid, camid))

        mAP, rank1, rank5 = 0, 0, 0
        data = dict()
        if have_data:
            data, distmat, img_name_q, img_name_g = evaluator.compute(reranking_parameter)
            cmc, mAP = eval_func(distmat, evaluator.q_pids, evaluator.g_pids, evaluator.q_camids, evaluator.g_camids,
                                 200)
            rank1, rank5 = cmc[0], cmc[4]

        if index == 0:
            data_1 = data
            mAP_green, rank1_green, rank5_green = mAP, rank1, rank5
        else:
            mAP_normal, rank1_normal, rank5_normal = mAP, rank1, rank5
    "green 和 normal的匹配结果拼接，作为最终的匹配结果"
    data_all = {**data_1, **data}
    # nowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    # with open(os.path.join(cfg.OUTPUT_DIR, 'result_{}.json'.format(nowTime)), 'w',encoding='utf-8') as fp:
    #     json.dump(data_all, fp)
    return [mAP_green, rank1_green, rank5_green], [mAP_normal, rank1_normal, rank5_normal]


def do_inference(cfg,
                 model,
                 val_loader_green,
                 val_loader_normal,
                 num_query_green,
                 num_query_normal):
    device = "cuda"
    logger = logging.getLogger("reid_baseline.test")
    logger.info("Enter inferencing")
    json_save_path = cfg.OUTPUT_DIR + '/' + cfg.MODEL.NAME
    if not os.path.exists(json_save_path):
        os.makedirs(json_save_path)  # 构建weight文件夹

    if device:
        if torch.cuda.device_count() > 1:
            print('Using {} GPUs for inference'.format(torch.cuda.device_count()))
            model = nn.DataParallel(model)
        model.to(device)

    model.eval()
    val_loader = [val_loader_green, val_loader_normal]
    for index, loader in enumerate(val_loader):
        "green和normal的 reranking参数不同"
        if index == 0:
            subfix = '1'
            reranking_parameter = [10, 3, 0.6]#[14, 4, 0.4]
            evaluator = R1_mAP(num_query_green, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM,
                               reranking=cfg.TEST.RE_RANKING)
        else:
            subfix = '2'
            reranking_parameter = [10, 3, 0.6]
            evaluator = R1_mAP(num_query_normal, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM,
                               reranking=cfg.TEST.RE_RANKING)

        evaluator.reset()
        "分别保存green和normal的距离矩阵？？？"
        DISTMAT_PATH = os.path.join(json_save_path, "distmat_{}.npy".format(subfix))
        QUERY_PATH = os.path.join(json_save_path, "query_path_{}.npy".format(subfix))
        GALLERY_PATH = os.path.join(json_save_path, "gallery_path_{}.npy".format(subfix))

        have_data = False  # 判断加载器是否存在数据（green有可能没有数据，没有数据会导致计算map时出错）
        print("calculate feature for test data ")
        for n_iter, (img, pid, camid, imgpath) in tqdm(enumerate(loader)):
            "一次性读入所有的数据"
            have_data = True
            with torch.no_grad():
                img = img.to(device)
                if cfg.TEST.FLIP_FEATS == 'on':
                    feat = torch.FloatTensor(img.size(0), feat_channel).zero_().cuda()
                    for i in range(2):
                        "图片反转的特征+原图特征"
                        if i == 1:
                            inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda()
                            img = img.index_select(3, inv_idx)
                        f = model(img)
                        feat = feat + f
                else:
                    feat = model(img)

                evaluator.update((feat, imgpath, pid, camid))

        data = dict()
        if have_data:
            data, distmat, img_name_q, img_name_g = evaluator.compute(reranking_parameter)
            np.save(DISTMAT_PATH, distmat)
            np.save(QUERY_PATH, img_name_q)
            np.save(GALLERY_PATH, img_name_g)

        if index == 0:
            data_1 = data
    "green 和 normal的匹配结果拼接，作为最终的匹配结果"
    data_all = {**data_1, **data}
    nowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    with open(os.path.join(json_save_path, 'result_{}.json'.format(nowTime)), 'w', encoding='utf-8') as fp:
        json.dump(data_all, fp)


def do_inference_Pseudo(cfg,
                        model,
                        val_loader,
                        num_query
                        ):
    """
    用于无监督的类别预测
    :param cfg:
    :param model:
    :param val_loader:
    :param num_query:
    :return:
    """
    device = "cuda"

    evaluator = R1_mAP_Pseudo(num_query, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM)
    evaluator.reset()
    if device:
        if torch.cuda.device_count() > 1:
            print('Using {} GPUs for inference'.format(torch.cuda.device_count()))
            model = nn.DataParallel(model)
        model.to(device)

    reranking_parameter = [14, 4, 0.4]

    model.eval()
    for n_iter, (img, pid, camid, imgpath) in enumerate(val_loader):
        with torch.no_grad():
            img = img.to(device)
            if cfg.TEST.FLIP_FEATS == 'on':
                feat = torch.FloatTensor(img.size(0), feat_channel).zero_().cuda()
                for i in range(2):
                    if i == 1:
                        inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda()
                        img = img.index_select(3, inv_idx)
                    f = model(img)
                    feat = feat + f
            else:
                feat = model(img)

            evaluator.update((feat, imgpath, pid, camid))

    distmat, img_name_q, img_name_g = evaluator.compute(reranking_parameter)

    return distmat, img_name_q, img_name_g
