from __future__ import print_function, division
import os
import argparse
import logging
import numpy as np
from pathlib import Path
from ptflops import get_model_complexity_info
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from core.igev_stereo import IGEVStereo
from evaluate_stereo import *
import core.stereo_datasets as datasets
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from core.utils import config

try:
    from torch.cuda.amp import GradScaler
except:
    class GradScaler:
        def __init__(self):
            pass
        def scale(self, loss):
            return loss
        def unscale_(self, optimizer):
            pass
        def step(self, optimizer):
            optimizer.step()
        def update(self):
            pass

os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'

# 计算loss，损失函数=初始视差SmoothL1损失+累和(指数增加权重权重*ConvGRU迭代视差L1损失)
def sequence_loss(disp_preds, disp_init_pred, disp_gt, valid, loss_gamma=0.9, max_disp=192):
    """ Loss function defined over sequence of flow predictions """

    n_predictions = len(disp_preds) # ConvGRU迭代视差List长度；默认32
    assert n_predictions >= 1
    disp_loss = 0.0
    mag = torch.sum(disp_gt**2, dim=1).sqrt() # 地面实况
    valid = ((valid >= 0.5) & (mag < max_disp)).unsqueeze(1) # 掩码，有标签像素，且地面实况<最大视差
    assert valid.shape == disp_gt.shape, [valid.shape, disp_gt.shape]
    assert not torch.isinf(disp_gt[valid.bool()]).any()

    disp_loss += 1.0 * F.smooth_l1_loss(disp_init_pred[valid.bool()], disp_gt[valid.bool()], reduction='mean') # 初始视差SmoothL1损失，输入valid掩码的初始上下文上采样视差，valid掩码的地面实况，计算平均损失
    for i in range(n_predictions):
        adjusted_loss_gamma = loss_gamma**(15/(n_predictions - 1)) # 调整后的伽马，论文里的伽马没调整(=0.9)
        i_weight = adjusted_loss_gamma**(n_predictions - i - 1) # ConvGRU迭代视差的损失权重，以指数方式增加，伽马(调整后)^(N-i)
        i_loss = (disp_preds[i] - disp_gt).abs() # ConvGRU迭代视差L1损失
        assert i_loss.shape == valid.shape, [i_loss.shape, valid.shape, disp_gt.shape, disp_preds[i].shape]
        disp_loss += i_weight * i_loss[valid.bool()].mean() # 损失函数=初始视差SmoothL1损失+累和(指数增加权重*ConvGRU迭代视差L1损失)

    epe = torch.sum((disp_preds[-1] - disp_gt)**2, dim=1).sqrt()
    epe = epe.view(-1)[valid.view(-1)]

    metrics = { # 需要保存的训练指标
        'epe': epe.mean().item(),
        '1px': (epe < 1).float().mean().item(),
        '3px': (epe < 3).float().mean().item(),
        '5px': (epe < 5).float().mean().item(),
    }
    return disp_loss, metrics

# 设置优化器和学习率调度器
def fetch_optimizer(args, model):
    """ Create the optimizer and learning rate scheduler """
    optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=1e-8) # AdamW优化器，最大学习率2e-4，权重衰减率1e-5，浮点稳定数1e-8(认为浮点相等的差值)
    scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps, pct_start=0.01, cycle_momentum=False, anneal_strategy='linear') # 学习率调度器，最大学习率，总步长，预热部分占比，不周期性调整动量，线性降低学习率策略
    return optimizer, scheduler

class Logger:
    SUM_FREQ = 100
    def __init__(self, model, scheduler):
        self.model = model
        self.scheduler = scheduler
        self.total_steps = 0
        self.running_loss = {}
        self.writer = SummaryWriter(log_dir=args.logdir)

    def _print_training_status(self):
        metrics_data = [self.running_loss[k]/Logger.SUM_FREQ for k in sorted(self.running_loss.keys())]
        training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0])
        metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)

        # print the training status
        logging.info(f"Training Metrics ({self.total_steps}): {training_str + metrics_str}")

        if self.writer is None:
            self.writer = SummaryWriter(log_dir=args.logdir)

        for k in self.running_loss:
            self.writer.add_scalar(k, self.running_loss[k]/Logger.SUM_FREQ, self.total_steps)
            self.running_loss[k] = 0.0

    def push(self, metrics):
        self.total_steps += 1

        for key in metrics:
            if key not in self.running_loss:
                self.running_loss[key] = 0.0

            self.running_loss[key] += metrics[key]

        if self.total_steps % Logger.SUM_FREQ == Logger.SUM_FREQ-1:
            self._print_training_status()
            self.running_loss = {}

    def write_dict(self, results):
        if self.writer is None:
            self.writer = SummaryWriter(log_dir=args.logdir)

        for key in results:
            self.writer.add_scalar(key, results[key], self.total_steps)

    def close(self):
        self.writer.close()

def train(args):
    model = nn.DataParallel(IGEVStereo(args))
    print("Parameter Count: %d" % count_parameters(model))
    if 'usvinland' in args.dataset:
        # 使用ptflops模块计算复杂度
        prepare_input = lambda _: {"image1":  torch.FloatTensor(1, 3, 320, 640).to('cuda'), "image2":  torch.FloatTensor(1, 3, 320, 640).to('cuda')}
        macs, params = get_model_complexity_info(model.module, input_res=(3, 320, 640), input_constructor=prepare_input, print_per_layer_stat=False, verbose=False)
        print(f'ptflops: {{ macs: {macs}, params: {params} }}')

    train_loader = datasets.fetch_dataloader(args) # 加载训练数据集(源代码未分割数据集，训练所有样本)
    optimizer, scheduler = fetch_optimizer(args, model) # 优化器和学习率调度器
    total_steps = 0
    logger = Logger(model, scheduler)

    if args.restore_ckpt is not None: # 载入预训练权重
        assert args.restore_ckpt.endswith(".pth")
        logging.info("Loading checkpoint...")
        checkpoint = torch.load(args.restore_ckpt)
        model.load_state_dict(checkpoint, strict=True)
        logging.info(f"Done loading checkpoint")
    model.cuda()
    model.train()
    model.module.freeze_bn() # We keep BatchNorm frozen，使用预训练权重微调时，冻结批归一化层权重以保留大数据集的统计信息

    validation_frequency = len(train_loader) # 验证频率，每epoch验证

    scaler = GradScaler(enabled=args.mixed_precision) # 混合精度训练(存储乘法fp16、累加fp32)时使用，mixed_precision控制是否启用

    should_keep_training = True
    global_batch_num = 0
    error_min = np.inf
    while should_keep_training:

        for i_batch, (_, _, *data_blob) in enumerate(tqdm(train_loader)):
            optimizer.zero_grad() # 每个batch开始时清零梯度，避免反向传播时的梯度错误累加
            image1, image2, disp_gt, valid = [x.cuda() for x in data_blob] # disp_gt是地面实况，valid是视差掩码(转为float)

            assert model.training
            disp_init_pred, disp_preds = model(image1, image2, iters=args.train_iters) # 前向传播，ConvGRU默认迭代32次，返回初始上下文上采样视差，32次迭代空间上采样视差List
            assert model.training

            loss, metrics = sequence_loss(disp_preds, disp_init_pred, disp_gt, valid, max_disp=args.max_disp) # 计算损失，输入32次迭代空间上采样视差List，初始上下文上采样视差，地面实况，float掩码，最大视差，输出损失，保存指标
            logger.writer.add_scalar("live_loss", loss.item(), global_batch_num)
            logger.writer.add_scalar(f'learning_rate', optimizer.param_groups[0]['lr'], global_batch_num)
            global_batch_num += 1
            scaler.scale(loss).backward() # 反向传播
            scaler.unscale_(optimizer) # 反缩放优化器
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # 梯度裁剪，最大梯度1.0

            scaler.step(optimizer) # 更新优化器
            scheduler.step() # 更新学习率调度器
            scaler.update()
            logger.push(metrics)

            # 验证和保存训练权重
            if i_batch == len(train_loader) - 1:
                # 调用不同数据集的验证方法
                if 'sceneflow' in args.dataset:
                    results = validate_sceneflow(model.module, iters=args.valid_iters)
                    error = results['scene-disp-d1']
                elif 'kitti' in args.dataset:
                    results = validate_kitti(args, model.module, iters=args.valid_iters) # 传递控制参数，用于验证模式加载数据集
                    error = results['avg_error']
                elif 'usvinland' in args.dataset:
                    results = validate_usvinland(args, model.module, iters=args.valid_iters)
                    error = results['avg_error']
                else:
                    raise Exception('Unknown validation dataset:', args.dataset)

                # 保存最低平均误差的权重
                if error_min > error:
                    error_min = error
                    best_path = Path(args.ckptdir + '/%d_%s.pth' % (total_steps + 1, args.name))
                    logging.info(f"Saving file {best_path.absolute()}")

                    state = {}
                    state['state_dict'] = model.state_dict()
                    state['epoch'] = total_steps // validation_frequency
                    state['step'] = total_steps + 1
                    state['vali_avg_error'] = results['avg_error']
                    state['vali_error_1px'] = results['error_1px']
                    state['vali_error_2px'] = results['error_2px']
                    state['vali_error_3px'] = results['error_3px']
                    state['vali_error_4px'] = results['error_4px']
                    state['vali_error_5px'] = results['error_5px']

                    torch.save(state, best_path)

                logger.write_dict(results)
                model.train()
                model.module.freeze_bn()

            total_steps += 1

            if total_steps > args.num_steps: # 默认200k步停止训练
                should_keep_training = False
                break

            if total_steps > 10000: # 实验只做10k
                should_keep_training = False
                break

        if len(train_loader) >= 10000: # 大型数据集每个batch保存一次权重
            save_path = Path(args.ckptdir + '/%d_epoch_%s.pth.gz' % (total_steps + 1, args.name))
            logging.info(f"Saving file {save_path}")
            torch.save(model.state_dict(), save_path)

    print("FINISHED TRAINING")
    logger.close()
    PATH = args.ckptdir + '/%s.pth' % args.name
    # torch.save(model.state_dict(), PATH)

    return PATH

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--name', default='igev-stereo', help="name your experiment")
    parser.add_argument('--restore_ckpt', default='./pretrained_models/sceneflow/sceneflow.pth', help="load the weights from a specific checkpoint")
    parser.add_argument('--mixed_precision', default=True, action='store_true', help='use mixed precision')
    parser.add_argument('--logdir', default='./logs', help='the directory to save logs')
    parser.add_argument('--ckptdir', default='./checkpoints', help='the directory to save checkpoints')

    # Training parameters
    parser.add_argument('--batch_size', type=int, default=1, help="batch size used during training")
    parser.add_argument('--dataset', default='sceneflow', help="training datasets", choices=["eth3d", "kitti", "sceneflow", "usvinland", "usvinland_seg"] + [f"middlebury_{s}" for s in 'FHQ'])
    parser.add_argument('--mode', help="dataset mode for evaluate", default='validate', choices=["train", "validate", "test"])
    parser.add_argument('--kfold', type=int, default=0, help='Kfold num', choices=range(6)) # 5折交叉验证，若为0则不使用交叉验证
    parser.add_argument('--lr', type=float, default=0.0002, help="max learning rate")
    parser.add_argument('--num_steps', type=int, default=200000, help="length of training schedule")
    parser.add_argument('--image_size', type=int, nargs='+', default=[320, 736], help="size of the random image crops used during training")
    parser.add_argument('--train_iters', type=int, default=22, help="number of updates to the disparity field in each forward pass")
    parser.add_argument('--wdecay', type=float, default=.00001, help="Weight decay in optimizer")

    # Validation parameters
    parser.add_argument('--valid_iters', type=int, default=32, help='number of flow-field updates during validation forward pass')

    # Architecure choices
    parser.add_argument('--corr_implementation', choices=["reg", "alt", "reg_cuda", "alt_cuda"], default="reg", help="correlation volume implementation")
    parser.add_argument('--shared_backbone', action='store_true', help="use a single backbone for the context and feature encoders")
    parser.add_argument('--corr_levels', type=int, default=2, help="number of levels in the correlation pyramid") # 相关性金字塔层数；默认2
    parser.add_argument('--corr_radius', type=int, default=4, help="width of the correlation pyramid") # 相关性金字塔宽度；默认4
    parser.add_argument('--n_downsample', type=int, default=2, help="resolution of the disparity field (1/2^K)") # 下采样级别，到1/2^K尺寸；默认2
    parser.add_argument('--slow_fast_gru', action='store_true', help="iterate the low-res GRUs more frequently") # 更频繁迭代低分辨率GRU，布尔值；默认False
    parser.add_argument('--n_gru_layers', type=int, default=3, help="number of hidden GRU levels") # GRU的层数；默认3
    parser.add_argument('--hidden_dims', nargs='+', type=int, default=[128]*3, help="hidden state and context dimensions") # GRU隐藏状态的维度；默认[128,128,128]
    parser.add_argument('--max_disp', type=int, default=192, help="max disp of geometry encoding volume")

    # Data augmentation
    parser.add_argument('--img_gamma', type=float, nargs='+', default=None, help="gamma range")
    parser.add_argument('--saturation_range', type=float, nargs='+', default=[0, 1.4], help='color saturation')
    parser.add_argument('--do_flip', default=False, choices=['h', 'v'], help='flip the images horizontally or vertically')
    parser.add_argument('--spatial_scale', type=float, nargs='+', default=[-0.2, 0.4], help='re-scale the images randomly')
    parser.add_argument('--noyjitter', action='store_true', help='don\'t simulate imperfect rectification')
    args = parser.parse_args()

    torch.manual_seed(666)
    np.random.seed(666)

    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')

    Path(args.logdir).mkdir(exist_ok=True, parents=True)
    Path(args.ckptdir).mkdir(exist_ok=True, parents=True)

    config.kfold = args.kfold

    train(args)
