import os
from skimage import io, transform
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms  # , utils
# import torch.optim as optim

import numpy as np
from PIL import Image
import glob
import cv2

from data_loader import RescaleT
from data_loader import ToTensor
from data_loader import ToTensorLab
from data_loader import SalObjDataset

from model import U2NET  # full size version 173.6 MB
from model import U2NETP  # small version u2net 4.7 MB

import sys
from alfred.utils.log import logger as logging
from alfred.dl.torch.common import device
import webcolors

# normalize the predicted SOD probability map

my_transform = transforms.Compose([RescaleT(320), ToTensorLab(flag=0)])

bk_color = webcolors.name_to_rgb('purple')
bk_color_bgr = (bk_color.blue, bk_color.green, bk_color.red)

bk_color = webcolors.name_to_rgb('orange')
bk_color_bgr2 = (bk_color.blue, bk_color.green, bk_color.red)


def normPRED(d):
    ma = torch.max(d)
    mi = torch.min(d)
    dn = (d-mi)/(ma-mi)
    return dn


def vis_output(ori_image, pred, d_dir, w_t=0, f_n=None):
    predict = pred
    predict = predict.squeeze()
    predict_np = predict.cpu().data.numpy()
    out_img = np.array(predict_np*255, dtype=np.uint8)
    image = ori_image.copy()

    out_img = cv2.resize(out_img, (image.shape[1], image.shape[0]))
    # found contours of out img
    th, dst = cv2.threshold(out_img, 230, 255, cv2.THRESH_BINARY)
    contours, _ = cv2.findContours(dst, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(image, contours, -1, (0, 255, 0), 3)
    res = cv2.bitwise_and(ori_image, ori_image, mask=dst)

    bk_img = np.zeros(ori_image.shape, np.uint8)
    bk_img[:] = bk_color_bgr
    bk_img = cv2.bitwise_and(bk_img, bk_img, mask=255-dst)
    cc = res + bk_img

    bk_img[:] = bk_color_bgr2
    bk_img = cv2.bitwise_and(bk_img, bk_img, mask=255-dst)
    cc2 = res + bk_img

    cv2.imshow('rr', out_img)
    cv2.imshow('imgg', image)
    cv2.imshow('res', res)
    cv2.imshow('cc', cc)
    cv2.imshow('cc2', cc2)
    if f_n:
        print('test_data/crop_{}'.format(f_n))
        cv2.imwrite( './test_data/crop_{}'.format(f_n), res)
        cv2.imwrite('./test_data/mask_{}'.format(f_n), out_img)
        cv2.imwrite('./test_data/contour_{}'.format(f_n), image)
        cv2.imwrite('./test_data/cc_{}'.format(f_n), cc)
        cv2.imwrite('./test_data/cc2_{}'.format(f_n), cc2)
    cv2.waitKey(w_t)


def preprocess_img(ori_img):
    # ori_img = np.array([ori_img])
    in_trans = {'imidx': np.array(
        [0]), 'image': ori_img, 'label': np.zeros(ori_img.shape)}
    inn = my_transform(in_trans)
    return inn['image']


def main():
    model_name = 'u2net'  # u2netp

    data_f = sys.argv[1]
    prediction_dir = './test_data/' + model_name + '_results/'
    model_dir = './saved_models/' + model_name + '/' + model_name + '.pth'

    if(model_name == 'u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3, 1)
    elif(model_name == 'u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3, 1)
    net.load_state_dict(torch.load(model_dir))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    if os.path.isdir(data_f):
        img_name_list = glob.glob(os.path.join(data_f, '*.jpg'))
        logging.info('Found all {} images.'.format(len(img_name_list)))
        for img_p in img_name_list:
            ori_img = cv2.imread(img_p)
            inputs_test = preprocess_img(ori_img)
            inputs_test = inputs_test.type(
                torch.FloatTensor).to(device).unsqueeze(0)
            d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)

            # normalization
            pred = d1[:, 0, :, :]
            pred = normPRED(pred)
            # save results to test_results folder
            vis_output(ori_img, pred, prediction_dir, f_n=os.path.basename(img_p))
            del d1, d2, d3, d4, d5, d6, d7
    elif 'mp4' in data_f:
        # on video
        cap = cv2.VideoCapture(data_f)
        while(cap.isOpened()):
            ret, frame = cap.read()
            inputs_test = preprocess_img(frame)
            inputs_test = inputs_test.type(
                torch.FloatTensor).to(device).unsqueeze(0)
            d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)

            # normalization
            pred = d1[:, 0, :, :]
            pred = normPRED(pred)
            # save results to test_results folder
            vis_output(frame, pred, prediction_dir, w_t=1)
            del d1, d2, d3, d4, d5, d6, d7


if __name__ == "__main__":
    main()
