from __future__ import print_function, division
import argparse
import os
import cv2
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import numpy as np
import time
from datasets import __datasets__
# from models import __models__
from utils import *
from torch.utils.data import DataLoader
from utils.config import *
# cudnn.benchmark = True

os.environ['CUDA_VISIBLE_DEVICES'] = '0'

parser = argparse.ArgumentParser(description='Accurate and Real-Time Stereo Matching via Context and Geometry Interaction (CGI-Stereo)')
parser.add_argument('--model', default='CGI_Stereo', help='select a model structure', choices=['CGI_Stereo', 'ACVNet'])
parser.add_argument('--maxdisp', type=int, default=192, help='maximum disparity')
parser.add_argument('--dataset', default='kitti', help='dataset name', choices=__datasets__.keys())
parser.add_argument('--datapath_12', default=sys_root+"/KITTI/KITTI2012/data_stereo_flow/", help='data path')
parser.add_argument('--datapath_15', default=sys_root+"/KITTI/KITTI2015/data_scene_flow/", help='data path')
parser.add_argument('--datapath_usv', default=sys_root+"/USVInland/Stereo Matching/Low_Res_640_320/", help='data path')
parser.add_argument('--datapath_usv_seg', default=sys_root+"/USVInland/Stereo Matching/Segmentation/", help='data path')
parser.add_argument('--datapath_spring', default=sys_root+"/Spring/", help='data path')
parser.add_argument('--num_workers', type=int, default=0, help='num workers')
parser.add_argument('--testlist',default='./filenames/kitti15_test.txt', help='testing list')
parser.add_argument('--kfold', type=int, default=0, help='Kfold num', choices=range(6)) # 5折交叉验证，若为0则不使用交叉验证
parser.add_argument('--loadckpt', default='./pretrained_models/CGI_Stereo/kitti.ckpt',help='load the weights from a specific checkpoint')
# parse arguments
args = parser.parse_args()

# models
if args.model == 'CGI_Stereo':
    from models import __models__
elif args.model == 'ACVNet':
    from models_acv import __models__

# dataset, dataloader
StereoDataset = __datasets__[args.dataset]
if args.dataset == 'kitti':
    rate = 1.0 / 256
    test_dataset = StereoDataset(args.datapath_12, args.datapath_15, args.testlist, False)
elif args.dataset == 'usvinland':
    rate = 1.0 / 255 * 50
    if args.kfold != 0:
        print('Kfold:', args.kfold)
        args.testlist = args.testlist.replace('.txt', '_' + str(args.kfold) + '.txt') # 交叉验证数据列表，带有区分标志，如usvinland_val_1.txt
    test_dataset = StereoDataset(args.datapath_usv, args.testlist, False)
elif args.dataset == 'usvinland_seg':
    rate = 1.0 / 255 * 50
    test_dataset = StereoDataset(args.datapath_usv_seg, args.testlist, False)
elif args.dataset == 'spring':
    rate = 1.0
    test_dataset = StereoDataset(args.datapath_spring, args.testlist, False)
TestImgLoader = DataLoader(test_dataset, 1, shuffle=False, num_workers=args.num_workers, drop_last=False)
print('dataset', args.dataset)
print('testlist:', args.testlist)

# model, optimizer
model = __models__[args.model](args.maxdisp)
model = nn.DataParallel(model)
model.cuda()

###load parameters
print("loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt)
model.load_state_dict(state_dict['model'])

save_dir = './out/' + args.dataset
if args.dataset == 'usvinland' and args.kfold != 0: save_dir = os.path.join(save_dir, 'Kfold', 'Kfold_' + str(args.kfold)) # 交叉验证输出

def test():
    os.makedirs(save_dir, exist_ok=True)
    os.makedirs(os.path.join(save_dir, 'pseudo'), exist_ok=True)

    # # 预热，让模型先跑50轮
    # x, y = torch.randn(1, 3, 640, 640).cuda(), torch.randn(1, 3, 640, 640).cuda()
    # for i in range(50):
    #     output = model(x, y)

    for batch_idx, sample in enumerate(TestImgLoader):
        torch.cuda.synchronize()
        start_time = time.time()
        disp_est_np = tensor2numpy(test_sample(sample))
        torch.cuda.synchronize()
        print('Iter {}/{}, time = {:3f}'.format(batch_idx, len(TestImgLoader), time.time() - start_time))
        top_pad_np = tensor2numpy(sample["top_pad"])
        right_pad_np = tensor2numpy(sample["right_pad"])
        left_filenames = sample["left_filename"]

        for disp_est, top_pad, right_pad, fn in zip(disp_est_np, top_pad_np, right_pad_np, left_filenames):
            assert len(disp_est.shape) == 2
            if right_pad == 0: disp_est = np.array(disp_est[top_pad:, :], dtype=np.float32)
            else: disp_est = np.array(disp_est[top_pad:, :-right_pad], dtype=np.float32)

            filename = fn.split('/')[-1].replace('jpg', 'png')
            fn = os.path.join(save_dir, filename)
            if 'usvinland' in args.dataset:
                disp_est_uint = np.round(disp_est / rate).astype(np.uint8)
                disp_pseudo = cv2.applyColorMap(cv2.convertScaleAbs(disp_est_uint, alpha=5), cv2.COLORMAP_JET) # 映射到伪彩色，近红远蓝
            else:
                disp_est_uint = np.round(disp_est / rate).astype(np.uint16)
                disp_pseudo = cv2.applyColorMap(cv2.convertScaleAbs(disp_est_uint * rate, alpha=5), cv2.COLORMAP_JET) # 映射到伪彩色，近红远蓝
            cv2.imwrite(fn, disp_est_uint)
            cv2.imwrite(os.path.join(save_dir, 'pseudo', filename), disp_pseudo)

# test one sample
@make_nograd_func
def test_sample(sample):
    model.eval()
    if args.model == 'CGI_Stereo':
        disp_ests = model(sample['left'].cuda(), sample['right'].cuda())
    elif args.model == 'ACVNet':
        disp_ests = model(sample['left'].cuda(), sample['right'].cuda())
    return disp_ests[-1]

if __name__ == '__main__':
    test()
