import os
import time
import shutil
import torch
import torchvision
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import cv2
import utils.transforms as tf
import numpy as np
import models
# from models import sync_bn
import dataset as ds
from options.options import parser
import torch.nn.functional as F
from prob_to_lines import *
import time
best_mIoU = 0


def model_tst():
    global args, best_mIoU
    args = parser.parse_args()

    # os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(gpu) for gpu in args.gpus)
    
    args.gpus = len(args.gpus)

    if args.dataset == 'VOCAug' or args.dataset == 'VOC2012' or args.dataset == 'COCO':
        num_class = 21
        ignore_label = 255
        scale_series = [10, 20, 30, 60]
    elif args.dataset == 'Cityscapes':
        num_class = 19
        ignore_label = 255 
        scale_series = [15, 30, 45, 90]
    elif args.dataset == 'ApolloScape':
        num_class = 37 
        ignore_label = 255 
    elif args.dataset == 'CULane':
        num_class = 5
        ignore_label = 255
    else:
        raise ValueError('Unknown dataset ' + args.dataset)

    model = models.ERFNet(num_class)
    input_mean = model.input_mean
    input_std = model.input_std
    policies = model.get_optim_policies()
    model = torch.nn.DataParallel(model)

    if args.resume:
        if os.path.isfile(args.resume):
            print(("=> loading checkpoint '{}'".format(args.resume)))
            checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))
            args.start_epoch = checkpoint['epoch']
            best_mIoU = checkpoint['best_mIoU']
            torch.nn.Module.load_state_dict(model, checkpoint['state_dict'])
            print(("=> loaded checkpoint '{}' (epoch {})".format(args.evaluate, checkpoint['epoch'])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.resume)))
    model.eval()


    transform = torchvision.transforms.Compose([
            tf.GroupRandomScaleNew(size=(args.img_width, args.img_height), interpolation=(cv2.INTER_LINEAR, cv2.INTER_NEAREST)),
            tf.GroupNormalize(mean=(input_mean, (0, )), std=(input_std, (1, ))),
        ])
    # 590 1640
    img = cv2.imread('frame1.jpg')
    img_show = cv2.resize(img, (976, 208))
    img = cv2.resize(img, (590, 1640))
    # img = img[240:, :, :].astype(np.float32)
    img, label = transform((img, img))
    img = cv2.resize(img, (976, 208))


    inputs = torch.from_numpy(img).permute(2, 0, 1).contiguous().float()
    inputs = inputs.unsqueeze(0)
    
    a = time.time()
    outputs, output_exist = model(inputs)
    
    print(time.time()-a)
    
    # print(outputs)
    print(outputs.shape, output_exist.shape)
    weights = [1.0 for _ in range(5)]
    weights[0] = 0.4
    class_weights = torch.FloatTensor(weights)
    criterion = torch.nn.NLLLoss(ignore_index=255, weight=class_weights)
    print(torch.nn.functional.log_softmax(outputs, dim=1).shape)
    # loss = criterion(torch.nn.functional.log_softmax(outputs, dim=1), outputs)
    
    
    # coords = GetLines(output_exist[0], outputs)
    # print(coords)
    print()
    # print(outputs[0,:,100,200])
    # print(outputs[0,3,:,:])

    outputs = F.softmax(outputs, dim=1).data.numpy()
    pred_path = 'predict.jpg'
    prob_map = None
    for i in range(4):
        if prob_map is None:
            prob_map = (outputs[0][i+1]*255).astype(int)
            print(1)
        else:
            prob_map += (outputs[0][i+1]*255).astype(int)
            print((prob_map.shape))
            print(2)


    save_img = cv2.blur(prob_map,(9,9))
    print(save_img)
    cv2.imwrite(pred_path, save_img)

    #show 
    print(save_img.max())
    for i in range(3):
        # print(img_show[:,:,i].shape, prob_map.shape)
        if i == 2:
            img_show[:,:,i][save_img>0] = 255

    # for i in range(208):
    #     for j in range(976):
    #         a = save_img[i][j]
    #         if a > 200:
    #             print(a)    


    # cv2.imshow('lane', img_show)
    # cv2.waitKey(0)


    # input_ = inputs.to_mkldnn()
    # from torch.utils import mkldnn as mkldnn_utils
    # model_ = mkldnn_utils.to_mkldnn(model)
    # o, target = model_(input_)

    return model, transform


def video_detect():
    model, transform = model_tst()
    cap = cv2.VideoCapture('output.mp4')


    fps = 10
    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')

    out_cap = cv2.VideoWriter()
    out_cap.open('result.mp4', fourcc, fps, (976, 208))

    while cap.isOpened():
        ret, img = cap.read()

        img_show = cv2.resize(img, (976, 208))
        img = cv2.resize(img, (590, 1640))
        # img = img[240:, :, :].astype(np.float32)
        img, label = transform((img, img))
        img = cv2.resize(img, (976, 208))


        inputs = torch.from_numpy(img).permute(2, 0, 1).contiguous().float()
        inputs = inputs.unsqueeze(0)
        
        # a = time.time()
        outputs, output_exist = model(inputs)
        # print(time.time()-a)
        outputs = F.softmax(outputs, dim=1).data.numpy()

        prob_map = None
        for i in range(4):
            if prob_map is None:
                prob_map = (outputs[0][i+1]*255).astype(int)
                print(1)
            else:
                prob_map += (outputs[0][i+1]*255).astype(int)
                print((prob_map.shape))
                print(2)

        save_img = cv2.blur(prob_map,(9,9))
        for i in range(3):
            # print(img_show[:,:,i].shape, prob_map.shape)
            if i == 2:
                img_show[:,:,i][save_img>150] = 255
                break
        
        cv2.imshow('img', img_show)
        cv2.waitKey(2)


        out_cap.write(img_show)

    cap.close()
    out_cap.close()
    
    


if __name__ == '__main__':
    model_tst()
    # video_detect()
