# Copyright (C) 2022-present Naver Corporation. All rights reserved.
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).

# --------------------------------------------------------
# Main training function
# --------------------------------------------------------

import argparse
import datetime
import json
import numpy as np
import os
import sys
import time
from ptflops import get_model_complexity_info
sys.path.append(".")

import torch
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader

from utils import config
import utils.misc as misc
from utils.misc import NativeScalerWithGradNormCount as NativeScaler
from models.croco_downstream import CroCoDownstreamBinocular, croco_args_from_ckpt
from models.pos_embed import interpolate_pos_embed
from models.head_downstream import PixelwiseTaskWithDPT

from stereoflow.datasets_stereo import get_train_dataset_stereo, get_test_datasets_stereo
from stereoflow.datasets_flow import get_train_dataset_flow, get_test_datasets_flow
from stereoflow.engine import train_one_epoch, validate_one_epoch
from stereoflow.criterion import *

os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'

def get_args_parser():
    # prepare subparsers
    parser = argparse.ArgumentParser('Finetuning CroCo models on stereo or flow', add_help=False)
    subparsers = parser.add_subparsers(title="Task (stereo or flow)", dest="task", required=True)
    parser_stereo = subparsers.add_parser('stereo', help='Training stereo model')
    parser_flow = subparsers.add_parser('flow', help='Training flow model')
    def add_arg(name_or_flags, default=None, default_stereo=None, default_flow=None, **kwargs):
        if default is not None: assert default_stereo is None and default_flow is None, "setting default makes default_stereo and default_flow disabled"
        parser_stereo.add_argument(name_or_flags, default=default if default is not None else default_stereo, **kwargs)
        parser_flow.add_argument(name_or_flags, default=default if default is not None else default_flow, **kwargs)
    # output dir
    add_arg('--output_dir', required=True, type=str, help='path where to save, if empty, automatically created') # tensorboard的log和权重存储位置
    # model
    add_arg('--crop', type=int, nargs = '+', default_stereo=[352, 704], default_flow=[320, 384], help = "size of the random image crops used during training.")
    add_arg('--pretrained', type=str, default='pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth', help="Load pretrained model (required as croco arguments come from there)") # 预训练模型地址，croco参数来源
    # criterion
    add_arg('--criterion', default_stereo='LaplacianLossBounded2()', default_flow='LaplacianLossBounded()', type=str, help='string to evaluate to get criterion') # 立体匹配使用LaplacianLossBounded2损失函数
    add_arg('--bestmetric', default_stereo='avgerr', default_flow='EPE', type=str)
    # dataset
    add_arg('--dataset', type=str, required=True, help="training set")
    add_arg('--kfold', type=int, default=0, help='Kfold num', choices=range(6)) # 5折交叉验证，若为0则不使用交叉验证
    # training
    add_arg('--seed', default=0, type=int, help='seed')
    add_arg('--batch_size', default_stereo=1, default_flow=1, type=int, help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus)') # 有效批大小=批大小*累积梯度迭代次数*分布式总进程数量
    add_arg('--epochs', default=32, type=int, help='number of training epochs')
    add_arg('--img_per_epoch', type=int, default=None, help='Fix the number of images seen in an epoch (None means use all training pairs)') # 每个epoch的样本数量
    add_arg('--accum_iter', default=1, type=int, help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)') # 累积梯度迭代次数(用于在内存限制下增加有效批量大小)
    add_arg('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') # 权重衰减(正则化)
    add_arg('--lr', type=float, default_stereo=3e-5, default_flow=2e-5, metavar='LR', help='learning rate (absolute lr)')
    add_arg('--min_lr', type=float, default=0., metavar='LR', help='lower lr bound for cyclic schedulers that hit 0')
    add_arg('--warmup_epochs', type=int, default=1, metavar='N', help='epochs to warmup LR') # 预热学习率的epoch数量
    add_arg('--optimizer', default='AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))', type=str, help="Optimizer from torch.optim [ default: AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95)) ]") # 优化器
    add_arg('--amp', default=0, type=int, choices=[0,1], help='enable automatic mixed precision training')
    # validation
    add_arg('--val_dataset', type=str, default='', help="Validation sets, multiple separated by + (empty string means that no validation is performed)")
    add_arg('--tile_conf_mode', type=str, default_stereo='conf_expsigmoid_15_3', default_flow='conf_expsigmoid_10_5', help='Weights for tile aggregation') # 推理时平铺策略的权重
    add_arg('--val_overlap', default=0.7, type=float, help='Overlap value for the tiling')
    # others
    add_arg('--num_workers', default=0, type=int) # Windows环境num_workers=0
    add_arg('--eval_every', type=int, default=1, help='Val loss evaluation frequency') # 每eval_every评估
    add_arg('--save_every', type=int, default=1, help='Save checkpoint frequency') # 每save_every保存，并且会保存最优权重
    add_arg('--start_from', type=str, default=None, help='Start training using weights from an other model (eg for finetuning)')
    add_arg('--tboard_log_step', type=int, default=1, help='Log to tboard every so many steps') # 记录tensorboard的步长
    add_arg('--dist_url', default='env://', help='url used to set up distributed training')

    return parser

def main(args):
    config.kfold = args.kfold

    # 初始化分布式模式
    misc.init_distributed_mode(args)
    global_rank = misc.get_rank()
    num_tasks = misc.get_world_size()

    assert os.path.isfile(args.pretrained)
    print("output_dir: "+args.output_dir)
    os.makedirs(args.output_dir, exist_ok=True)

    # fix the seed for reproducibility
    seed = args.seed + misc.get_rank()
    torch.manual_seed(seed)
    np.random.seed(seed)
    cudnn.benchmark = True

    # Metrics / Criterion，评价指标和损失函数
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    metrics = (StereoMetrics if args.task=='stereo' else FlowMetrics)().to(device) # 定义评价指标
    criterion = eval(args.criterion).to(device) # 定义损失函数，立体匹配LaplacianLossBounded2
    print('Criterion: ', args.criterion)

    # Prepare model
    assert os.path.isfile(args.pretrained)
    ckpt = torch.load(args.pretrained, 'cpu') # 加载预训练模型
    croco_args = croco_args_from_ckpt(ckpt) # 从权重中提取参数
    croco_args['img_size'] = (args.crop[0], args.crop[1]) # 将预训练权重的img_size属性赋值为当前微调的尺寸
    print('Croco args: '+str(croco_args))
    args.croco_args = croco_args # saved for test time
    # prepare head
    num_channels = {'stereo': 1, 'flow': 2}[args.task]
    if criterion.with_conf: num_channels += 1 # 若需要置信度(Confidence)信息，对应任务的通道+1
    print(f'Building head PixelwiseTaskWithDPT() with {num_channels} channel(s)')
    head = PixelwiseTaskWithDPT() # 下游像素级任务的密集预测DPT输出头模块，默认需要返回所有块输出
    head.num_channels = num_channels # 默认有置信度通道(2)
    # build model and load pretrained weights，构建模型并加载预训练权重
    model = CroCoDownstreamBinocular(head, **croco_args) # 下游双目任务模型，输入密集预测输出头模块和预训练权重参数
    interpolate_pos_embed(model, ckpt['model']) # 根据权重信息对位置嵌入插值，匹配模型结构(微调好像没有使用)
    msg = model.load_state_dict(ckpt['model'], strict=False)
    print(msg)

    total_params = sum(p.numel() for p in model.parameters())
    total_params_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"Total params: {total_params}")
    print(f"Total params trainable: {total_params_trainable}")
    if 'USVInland' in args.dataset:
        # 使用ptflops模块计算复杂度
        prepare_input = lambda _: {'img1': torch.FloatTensor(1, 3, 320, 640), 'img2': torch.FloatTensor(1, 3, 320, 640)}
        macs, params = get_model_complexity_info(model, input_res=(3, 320, 640), input_constructor=prepare_input, print_per_layer_stat=False, verbose=False)
        print(f'ptflops: {{ macs: {macs}, params: {params} }}')
    model_without_ddp = model.to(device) # model_without_ddp是model的引用

    eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() # 有效批大小=批大小*累积梯度迭代次数*分布式总进程数量
    print("lr: %.2e" % args.lr) # 学习率
    print("accumulate grad iterations: %d" % args.accum_iter) # 累积梯度迭代次数
    print("effective batch size: %d" % eff_batch_size) # 有效批大小

    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], static_graph=True)
        model_without_ddp = model.module
    else: # 不是分布式
        os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
        model = torch.nn.DataParallel(model)

    # following timm: set wd as 0 for bias and norm layers
    param_groups = misc.get_parameter_groups(model_without_ddp, args.weight_decay)
    optimizer = eval(f"torch.optim.{args.optimizer}") # 优化器，注入字符串表达式
    print(optimizer)
    loss_scaler = NativeScaler() # 梯度缩放

    # automatic restart，若训练中断，从最近的检查点重启
    last_ckpt_fname = os.path.join(args.output_dir, f'checkpoint-last.pth')
    args.resume = last_ckpt_fname if os.path.isfile(last_ckpt_fname) else None

    # 从其它检查点恢复训练
    if not args.resume and args.start_from:
        print(f"Starting from an other model's weights: {args.start_from}")
        best_so_far = None
        args.start_epoch = 0
        ckpt = torch.load(args.start_from, 'cpu')
        msg = model_without_ddp.load_state_dict(ckpt['model'], strict=False)
        print(msg)
    else:
        best_so_far = misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)

    if best_so_far is None: best_so_far = np.inf

    # tensorboard
    log_writer = None
    if global_rank == 0 and args.output_dir is not None:
        log_writer = SummaryWriter(log_dir=args.output_dir, purge_step=args.start_epoch*1000)

    #  dataset and loader，训练集和加载器
    print('Building Train Data loader for dataset: ', args.dataset)
    train_dataset = (get_train_dataset_stereo if args.task=='stereo' else get_train_dataset_flow)(args.dataset, crop_size=args.crop) # 获取训练数据
    def _print_repr_dataset(d): # 遍历并打印所有子数据集
        if isinstance(d, torch.utils.data.dataset.ConcatDataset):
            for dd in d.datasets:
                _print_repr_dataset(dd)
        else:
            print(repr(d))
    _print_repr_dataset(train_dataset)
    print('total length:', len(train_dataset))
    if args.distributed:
        sampler_train = torch.utils.data.DistributedSampler(
            train_dataset, num_replicas=num_tasks, rank=global_rank, shuffle=True
        )
    else:
        sampler_train = torch.utils.data.RandomSampler(train_dataset)
    data_loader_train = torch.utils.data.DataLoader( # 训练数据加载器
        train_dataset, sampler=sampler_train,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=True,
    )
    if args.val_dataset=='':
        data_loaders_val = None
    else:
        print('Building Val Data loader for datasets: ', args.val_dataset)
        val_datasets = (get_test_datasets_stereo if args.task=='stereo' else get_test_datasets_flow)(args.val_dataset) # 获取验证数据
        for val_dataset in val_datasets: print(repr(val_dataset))
        data_loaders_val = [DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=False) for val_dataset in val_datasets] # 验证数据加载器
        bestmetric = ("AVG_" if len(data_loaders_val)>1 else str(data_loaders_val[0].dataset)+'_')+args.bestmetric

    print(f"Start training for {args.epochs} epochs")
    step_total = 0
    start_time = time.time()
    # Training Loop，训练循环
    for epoch in range(args.start_epoch, args.epochs):

        if args.distributed: data_loader_train.sampler.set_epoch(epoch)

        # Train
        epoch_start = time.time()
        train_stats, step_total = train_one_epoch(model, criterion, metrics, data_loader_train, optimizer, device, epoch, step_total, loss_scaler, log_writer=log_writer, args=args) # 训练
        epoch_time = time.time() - epoch_start

        if args.distributed: dist.barrier()

        # Validation (current naive implementation runs the validation on every gpu ... not smart ...)，每eval_every验证一次
        if data_loaders_val is not None and args.eval_every > 0 and (epoch+1) % args.eval_every == 0:
            val_epoch_start = time.time()
            val_stats = validate_one_epoch(model, criterion, metrics, data_loaders_val, device, epoch, log_writer=log_writer, args=args) # 验证
            val_epoch_time = time.time() - val_epoch_start

            val_best = val_stats[bestmetric]

            # Save best of all，保存最优权重
            if val_best <= best_so_far:
                best_so_far = val_best
                misc.save_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch, best_so_far=best_so_far, fname='best', val_stats=val_stats, step_total=step_total)

            log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, 'epoch': epoch, **{f'val_{k}': v for k, v in val_stats.items()}}
        else:
            log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, 'epoch': epoch,}

        if args.distributed: dist.barrier()

        # Save stuff，每save_every个epoch或结束时保存权重
        if args.output_dir and ((epoch+1) % args.save_every == 0 or epoch + 1 == args.epochs):
            misc.save_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch, best_so_far=best_so_far, fname='last')

        # 写入log.txt
        if args.output_dir:
            if log_writer is not None:
                log_writer.flush()
            with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
                f.write(json.dumps(log_stats) + "\n")

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str)) # 训练总时长

if __name__ == '__main__':
    args = get_args_parser()
    args = args.parse_args()
    main(args)
