#!/usr/bin/env python
# -*- coding=utf-8 -*-
"""
@author: xingwg
@license: (C) Copyright 2020-2025.
@contact: xingweiguo@chinasvt.com
@project: boya-reid
@file: train.py
@time: 2020/9/13 10:29
@desc:
"""
import os
import argparse
import random
import glog
import time
import numpy as np
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
from torch.backends import cudnn
from torch.cuda.amp import (
    autocast,
    GradScaler,
)
from src.utils.meter import AverageMeter
from src.dataset import make_dataloader
from src.config import cfg
from src.model.make_model import make_model
from src.loss.make_loss import make_loss
from src.solver.make_optimizer import make_optimizer
from src.solver.lr_scheduler import WarmupMultiStepLR

try:
    from apex.fp16_utils import *
    from apex import amp, optimizers
    from apex.parallel import DistributedDataParallel as DDP
    from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
    raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")


def set_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument(
        "--config_file",
        default="",
        help="path to config file.",
        type=str
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line.",
        default=None,
        nargs=argparse.REMAINDER
    )

    args = parser.parse_args()

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    set_seed(cfg.SOLVER.SEED)

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    glog.info("Saving model in the path: {}".format(output_dir))
    glog.info(args)

    if args.config_file != "":
        glog.info("Load config file {}".format(args.config_file))
        with open(args.config_file, "r") as f:
            text = "\n" + f.read()
            glog.info(text)
    glog.info("Running with config:\n{}".format(cfg))

    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.MODEL.DEVICE_ID

    train_loader, val_loader, num_query, num_classes = make_dataloader(cfg)

    if cfg.MODEL.PRETRAIN_CHOICE == "finetune":
        model = make_model(cfg, num_class=num_classes)
        model.load_param_finetune(cfg.MODEL.PRETRAIN_PATH)
        glog.info("Loading pretrained model for finetuning...")
    else:
        model = make_model(cfg, num_class=num_classes)

    loss_func, center_criterion = make_loss(cfg, num_classes=num_classes)

    optimizer, optimizer_center = make_optimizer(cfg, model, center_criterion)
    scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA,
                                  cfg.SOLVER.WARMUP_FACTOR,
                                  cfg.SOLVER.WARMUP_EPOCHS, cfg.SOLVER.WARMUP_METHOD)

    glog.info("start training...")
    log_period = cfg.SOLVER.LOG_PERIOD
    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD

    device = "cuda"
    if device:
        model.to(device)
        if torch.cuda.device_count() > 1:
            glog.info("Using {} GPUs for training".format(torch.cuda.device_count()))
            model = nn.DataParallel(model)
        elif cfg.SOLVER.FP16:
            model, optimizer = amp.initialize(model, optimizer, opt_level="O1")

    # 实例化SummaryWriter对象，用来画loss曲线
    model_name = cfg.MODEL.NAME + "_{}x{}_b{}_lr{}_epoch{}_{}{}_{}{}_std".format(
                        cfg.INPUT.SIZE_TRAIN[0],
                        cfg.INPUT.SIZE_TRAIN[1],
                        cfg.SOLVER.IMS_PER_BATCH,
                        cfg.SOLVER.BASE_LR,
                        cfg.SOLVER.MAX_EPOCHS,
                        cfg.MODEL.TRIPLET_LOSS_WEIGHT,
                        cfg.MODEL.METRIC_LOSS_TYPE,
                        cfg.MODEL.ID_LOSS_WEIGHT,
                        cfg.MODEL.ID_LOSS_TYPE,
                    )
    writer = SummaryWriter(log_dir=os.path.join(cfg.OUTPUT_DIR, model_name), comment=model_name)

    loss_meter = AverageMeter()
    acc_meter = AverageMeter()

    # train
    scaler = GradScaler()
    epochs = cfg.SOLVER.MAX_EPOCHS
    for epoch in range(1, epochs + 1):
        start_time = time.time()
        loss_meter.reset()
        acc_meter.reset()

        model.train()
        count = 0
        for idx, (imgs, pids) in enumerate(train_loader):
            optimizer.zero_grad()
            optimizer_center.zero_grad()
            imgs = imgs.to(device)
            target = pids.to(device)

            # score, feat = model(imgs, target)
            # loss = loss_func(score, feat, target)
            with autocast():
                score, feat = model(imgs, target)
                loss = loss_func(score, feat, target)

            if cfg.SOLVER.FP16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                # loss.backward()
                scaler.scale(loss).backward()

            # optimizer.step()
            scaler.step(optimizer)

            if "center" in cfg.MODEL.METRIC_LOSS_TYPE:
                for param in center_criterion.parameters():
                    param.grad.data *= (1. / cfg.SOLVER.CENTER_LOSS_WEIGHT)
                # optimizer_center.step()
                scaler.step(optimizer_center)

            scaler.update()

            acc = (score.max(1)[1] == target).float().mean()
            loss_meter.update(loss.item(), imgs.shape[0])
            acc_meter.update(acc, 1)

            if (idx + 1) % log_period == 0:
                glog.info("Epoch[{}] Iteration[{}/{}] Loss: {:.6f}, Acc: {:.6f}, Base Lr: {:.6f}".format(
                    epoch,
                    idx + 1,
                    len(train_loader),
                    loss_meter.avg,
                    acc_meter.avg,
                    scheduler.get_lr()[0]
                ))

                writer.add_scalar("Loss", loss_meter.avg, idx + 1 + len(train_loader) * (epoch - 1))
                writer.add_scalar("Acc", acc_meter.avg, idx + 1 + len(train_loader) * (epoch - 1))

            count += 1

        scheduler.step()

        end_time = time.time()
        time_per_batch = (end_time - start_time) / count
        glog.info("Epoch[{}] done. Time per batch: {:.3f}[s] Speed: {:.2f}[samples/s]".format(
            epoch,
            time_per_batch,
            train_loader.batch_size / time_per_batch
        ))

        if epoch % checkpoint_period == 0:
            torch.save(
                model.state_dict(),
                os.path.join(
                    os.path.join(cfg.OUTPUT_DIR, model_name),
                    model_name + "_{}.pth".format(epoch)
                )
            )

    writer.close()
