'''
微调和验证用
'''
from __future__ import print_function
import argparse
import os
from ptflops import get_model_complexity_info
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import copy
from tqdm import tqdm
from dataloader import KITTI2015Loader as ls
from dataloader import DataLoader as DA
from utils.config import *
from utils import config
from torch.utils.tensorboard import SummaryWriter

from models import *

os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'

parser = argparse.ArgumentParser(description='PSMNet')
parser.add_argument('--maxdisp', type=int ,default=192, help='maxium disparity')
parser.add_argument('--model', default='stackhourglass', help='select model')
parser.add_argument('--datatype', default='kitti2015', help='datapath')
parser.add_argument('--kfold', type=int, default=0, help='Kfold num', choices=range(6)) # 5折交叉验证，若为0则不使用交叉验证
parser.add_argument('--batch_size', type=int, default=1, help='batch_size')
parser.add_argument('--num_workers', type=int, default=0, help='num_workers')
parser.add_argument('--epochs', type=int, default=300, help='number of epochs to train')
parser.add_argument('--loadmodel', default='./pretrained_models/pretrained_sceneflow_new.tar', help='load model')
parser.add_argument('--savemodel', default='./checkpoints/', help='save model')
parser.add_argument('--logdir', default='./logs/', help='the directory to save logs')
parser.add_argument('--no-cuda', action='store_true', default=False, help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

if args.datatype == 'kitti2015':
    from dataloader import KITTI2015Loader as ls
    if system == 'Windows': datapath = 'G:/KITTI/KITTI2015/data_scene_flow/training/'
    elif system == 'Ubuntu': datapath = '/media/ubuntu/e/zhouyiqing/KITTI/KITTI2015/data_scene_flow/training/'
elif args.datatype == 'kitti2012':
    from dataloader import KITTI2012Loader as ls
    if system == 'Windows': datapath = 'G:/KITTI/KITTI2012/data_stereo_flow/training/'
    elif system == 'Ubuntu': datapath = '/media/ubuntu/e/zhouyiqing/KITTI/KITTI2012/data_stereo_flow/training/'
elif args.datatype == 'usvinland':
    if args.kfold != 0: config.kfold = args.kfold
    from dataloader import USVInlandLoader as ls
    if system == 'Windows': datapath = 'G:/USVInland/Stereo Matching/Low_Res_640_320/'
    elif system == 'Ubuntu': datapath = '/media/ubuntu/e/zhouyiqing/USVInland/Stereo Matching/Low_Res_640_320/'
elif args.datatype == 'usvinland_seg':
    if args.kfold != 0: config.kfold = args.kfold
    from dataloader import USVInlandLoaderSeg as ls
    if system == 'Windows': datapath = 'G:/USVInland/Stereo Matching/Segmentation/'
    elif system == 'Ubuntu': datapath = '/media/ubuntu/e/zhouyiqing/USVInland/Stereo Matching/Segmentation/'

all_left_img, all_right_img, all_left_disp, val_left_img, val_right_img, val_left_disp = ls.dataloader(datapath)

TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(all_left_img, all_right_img, all_left_disp, True, datatype = args.datatype), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=False) # 加载训练集
ValImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(val_left_img, val_right_img, val_left_disp, False, datatype = args.datatype), batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, drop_last=False) # 加载验证集

if args.model == 'stackhourglass':
    model = stackhourglass(args.maxdisp)
elif args.model == 'basic':
    model = basic(args.maxdisp)
else:
    print('no model')

if args.cuda:
    model = nn.DataParallel(model)
    model.cuda()

if args.loadmodel is not None:
    state_dict = torch.load(args.loadmodel)
    model.load_state_dict(state_dict['state_dict'])

print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
if args.datatype == 'usvinland':
    # 使用ptflops模块计算复杂度
    prepare_input = lambda _: {"left": torch.FloatTensor(1, 3, 320, 640).to('cuda'), "right": torch.FloatTensor(1, 3, 320, 640).to('cuda')}
    macs, params = get_model_complexity_info(model.module, input_res=(3, 320, 640), input_constructor=prepare_input, print_per_layer_stat=False, verbose=False)
    print(f'ptflops: {{ macs: {macs}, params: {params} }}')

optimizer = optim.Adam(model.parameters(), lr=0.1, betas=(0.9, 0.999))

def train(imgL, imgR, disp_L):
    model.train()
    imgL = Variable(torch.FloatTensor(imgL))
    imgR = Variable(torch.FloatTensor(imgR))
    disp_L = Variable(torch.FloatTensor(disp_L))

    if args.cuda:
        imgL, imgR, disp_true = imgL.cuda(), imgR.cuda(), disp_L.cuda()

    #---------
    mask = (disp_true > 0)
    mask.detach_()
    #----

    optimizer.zero_grad()

    if args.model == 'stackhourglass':
        output1, output2, output3 = model(imgL, imgR)
        output1 = torch.squeeze(output1, 1)
        output2 = torch.squeeze(output2, 1)
        output3 = torch.squeeze(output3, 1)
        loss = 0.5*F.smooth_l1_loss(output1[mask], disp_true[mask], reduction='mean') + 0.7*F.smooth_l1_loss(output2[mask], disp_true[mask], reduction='mean') + F.smooth_l1_loss(output3[mask], disp_true[mask], reduction='mean') # 对3个输出的SmoothL1损失分别乘0.5、0.7和1的权重，求和
    elif args.model == 'basic':
        output = model(imgL, imgR)
        output = torch.squeeze(output3, 1)
        loss = F.smooth_l1_loss(output3[mask], disp_true[mask], reduction='mean')

    loss.backward()
    optimizer.step()

    return loss.item()

def validate(imgL, imgR, disp_true):
        model.eval()
        imgL = Variable(torch.FloatTensor(imgL))
        imgR = Variable(torch.FloatTensor(imgR))
        if args.cuda:
            imgL, imgR = imgL.cuda(), imgR.cuda()

        with torch.no_grad():
            output3 = model(imgL, imgR)

        pred_disp = output3.squeeze(1).data.cpu()

        # computing 3-px error，计算3px误差
        true_disp = copy.deepcopy(disp_true)
        mask = np.argwhere(true_disp > 0) # 掩码
        disp_true[mask[0][:], mask[1][:], mask[2][:]] = np.abs(true_disp[mask[0][:], mask[1][:], mask[2][:]]-pred_disp[mask[0][:], mask[1][:], mask[2][:]]) # 每一个有效位置的误差
        # correct = (disp_true[mask[0][:], mask[1][:], mask[2][:]] < 3) | (disp_true[mask[0][:], mask[1][:], mask[2][:]] < true_disp[mask[0][:], mask[1][:], mask[2][:]] * 0.05) # 偏移<3px或偏移<真实值的5%，视为估计正确
        avg_error = (float(torch.sum(disp_true)) / float(len(mask[0]))) # 平均误差
        error_1px = (float(torch.sum((disp_true[mask[0][:], mask[1][:], mask[2][:]] > 1))) / float(len(mask[0]))) # 1px误差
        error_2px = (float(torch.sum((disp_true[mask[0][:], mask[1][:], mask[2][:]] > 2))) / float(len(mask[0]))) # 2px误差
        error_3px = (float(torch.sum((disp_true[mask[0][:], mask[1][:], mask[2][:]] > 3))) / float(len(mask[0]))) # 3px误差
        error_4px = (float(torch.sum((disp_true[mask[0][:], mask[1][:], mask[2][:]] > 4))) / float(len(mask[0]))) # 4px误差
        error_5px = (float(torch.sum((disp_true[mask[0][:], mask[1][:], mask[2][:]] > 5))) / float(len(mask[0]))) # 5px误差
        torch.cuda.empty_cache()

        return {'avg_error': avg_error,
                'error_1px': error_1px,
                'error_2px': error_2px,
                'error_3px': error_3px,
                'error_4px': error_4px,
                'error_5px': error_5px}

# 调整学习率
def adjust_learning_rate(optimizer, epoch):
    if epoch <= 200: # 前200epoch，学习率1e-3
        lr = 0.001
    else: # >200epoch，学习率1e-4
        lr = 0.0001
    print("learning_rate:", lr)
    for param_group in optimizer.param_groups: # 修改学习率
        param_group['lr'] = lr

def main():
    min_error = np.inf
    total_step = 0

    # 创建tensorboard log文件
    writer = SummaryWriter(log_dir=args.logdir)

    for epoch in range(1, args.epochs+1):
        total_train_loss = 0
        total_vali_error_avg = 0
        total_vali_error_1px = 0
        total_vali_error_2px = 0
        total_vali_error_3px = 0
        total_vali_error_4px = 0
        total_vali_error_5px = 0
        adjust_learning_rate(optimizer, epoch) #  按照epoch数量调整学习率

        # Training
        for batch_idx, (imgL_crop, imgR_crop, disp_crop_L) in enumerate(tqdm(TrainImgLoader)):
            loss = train(imgL_crop, imgR_crop, disp_crop_L)
            total_step += 1

            # 绘制live_loss图像
            writer.add_scalar("live_loss", loss, total_step)

            total_train_loss += loss

        train_loss = total_train_loss/len(TrainImgLoader)
        print('epoch %d total training loss = %.3f' %(epoch, train_loss))

        # Validate
        for batch_idx, (imgL, imgR, disp_L) in enumerate(tqdm(ValImgLoader)):
            vali_error = validate(imgL, imgR, disp_L)

            print('Iter %d in val avg_error = %.3f | error_1px = %.3f | error_2px = %.3f | error_3px = %.3f | error_4px = %.3f | error_5px = %.3f' %(batch_idx, vali_error['avg_error'], vali_error['error_1px']*100, vali_error['error_2px']*100, vali_error['error_3px']*100, vali_error['error_4px']*100, vali_error['error_5px']*100))
            total_vali_error_avg += vali_error['avg_error']
            total_vali_error_1px += vali_error['error_1px']
            total_vali_error_2px += vali_error['error_2px']
            total_vali_error_3px += vali_error['error_3px']
            total_vali_error_4px += vali_error['error_4px']
            total_vali_error_5px += vali_error['error_5px']

        avg_error_avg = total_vali_error_avg / len(ValImgLoader)
        avg_error_1px = total_vali_error_1px / len(ValImgLoader) * 100
        avg_error_2px = total_vali_error_2px / len(ValImgLoader) * 100
        avg_error_3px = total_vali_error_3px / len(ValImgLoader) * 100
        avg_error_4px = total_vali_error_4px / len(ValImgLoader) * 100
        avg_error_5px = total_vali_error_5px / len(ValImgLoader) * 100

        # 绘制avg_error图像
        writer.add_scalar("avg_error_avg", avg_error_avg, epoch)
        writer.add_scalar("avg_error_1px", avg_error_1px, epoch)
        writer.add_scalar("avg_error_2px", avg_error_2px, epoch)
        writer.add_scalar("avg_error_3px", avg_error_3px, epoch)
        writer.add_scalar("avg_error_4px", avg_error_4px, epoch)
        writer.add_scalar("avg_error_5px", avg_error_5px, epoch)
        print('epoch %d in val total avg_error = %.3f | error_1px = %.3f | error_2px = %.3f | error_3px = %.3f | error_4px = %.3f | error_5px = %.3f' %(epoch, avg_error_avg, avg_error_1px, avg_error_2px, avg_error_3px, avg_error_4px, avg_error_5px))

        # Save，保存最低平均误差的权重
        avg_error = avg_error_avg
        if avg_error < min_error:
            min_error = avg_error

            savefilename = args.savemodel + 'finetune_' + str(epoch) + '.tar'
            torch.save({
                'state_dict': model.state_dict(),
                'train_loss': train_loss,
                'step': total_step,
                'epoch': epoch,
                'vali_error_avg': avg_error_avg,
                'vali_error_1px': avg_error_1px,
                'vali_error_2px': avg_error_2px,
                'vali_error_3px': avg_error_3px,
                'vali_error_4px': avg_error_4px,
                'vali_error_5px': avg_error_5px,
            }, savefilename)

    writer.close()

if __name__ == '__main__':
    main()
