import argparse
import cv2
import torch

from model import SCNN
from utils.prob2lines import getLane
from utils.transforms import *

# net = SCNN(input_size=(800, 288), pretrained=False)
net = SCNN(input_size=(512, 288), pretrained=False)
mean=(0.3598, 0.3653, 0.3662) # CULane mean, std
std=(0.2573, 0.2663, 0.2756)
# transform_img = Resize((800, 288))
transform_img = Resize((512, 288))
transform_to_net = Compose(ToTensor(), Normalize(mean=mean, std=std))


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--video_path", '-i', type=str, help="Path to demo video")
    parser.add_argument("--weight_path", '-w', type=str, help="Path to model weights")
    parser.add_argument("--frames", "-f", type=int, default=-1, help="frames to generate. All if -1.")
    args = parser.parse_args()


    args.weight_path = './experiments/exp4/exp4_best.pth'

    return args


def main(args):

    file_name_video = args.video_path
    weight_path = args.weight_path

    cap = cv2.VideoCapture(file_name_video)
    frames_num = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    fps = cap.get(cv2.CAP_PROP_FPS)
    size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    size = (512, 288)
    print(size)
    out_video = cv2.VideoWriter(file_name_video[:-4]+output_suffix, fourcc, fps, size)

    if args.frames<0:
        pbar.reset(total=frames_num)
    else:
        pbar.reset(total=args.frames)

    show_heat = False
    last_im_success = None
    while cap.isOpened():
        success, im_origin = cap.read()
        if success and (args.frames<0 or cap.get(cv2.CAP_PROP_POS_FRAMES)<args.frames):
            last_im_success = im_origin.copy()
        else:
            break
        assert last_im_success is not None
        img = last_im_success.copy()

        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = transform_img({'img': img})['img']
        x = transform_to_net({'img': img})['img']
        x.unsqueeze_(0)

        save_dict = torch.load(weight_path, map_location='cpu')
        net.load_state_dict(save_dict['net'])
        net.eval()

        seg_pred, exist_pred = net(x)[:2]
        seg_pred = seg_pred.detach().cpu().numpy()
        exist_pred = exist_pred.detach().cpu().numpy()
        seg_pred = seg_pred[0]
        exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]

        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        lane_img = np.zeros_like(img)
        color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]], dtype='uint8')
        coord_mask = np.argmax(seg_pred, axis=0)
        for i in range(0, 4):
            if exist_pred[0, i] > 0.5:
                lane_img[coord_mask == (i + 1)] = color[i]
        img = cv2.addWeighted(src1=lane_img, alpha=0.8, src2=img, beta=1., gamma=0.)

        out_video.write(img)
        pbar.update()

    pbar.refresh()
    out_video.release()
    cap.release()

    # if args.visualize:
    #     print([1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)])
    #     cv2.imshow("", img)
    #     cv2.waitKey(0)
    #     cv2.destroyAllWindows()


if __name__ == "__main__":
    args = parse_args()
    videos_list = [
        r'D:\PythonWorkroom\datasets\Elevator\School_With_Tags\20210222_044216_rot180.mp4',
        r'D:\PythonWorkroom\datasets\Elevator\School_With_Tags\20210222_050945.mp4',
        r'D:\PythonWorkroom\datasets\Elevator\School_With_Tags\20210222_052349.mp4',
        r'D:\PythonWorkroom\datasets\Elevator\School_With_Tags\20210222_053738.mp4',
        r'D:\PythonWorkroom\datasets\Elevator\School_With_Tags\20210222_054658.mp4',
        r'D:\PythonWorkroom\datasets\Elevator\School_With_Tags\20210222_230723_rot180.mp4',
        r'D:\PythonWorkroom\datasets\Elevator\School_With_Tags\20210222_231810_rot180.mp4',
    ]
    output_suffix = '_open_check_20210524.mp4'
    from tqdm import tqdm
    pbar = tqdm()
    for vName in videos_list:
        args.video_path = vName
        args.frames = 3000
        main(args)
