# 导入相关包
import cv2
import numpy as np
import torch
from torchvision import transforms
from PIL import Image, ImageOps

import scipy.misc as misc
import os
import glob

import torch
from torchvision import transforms
from PIL import Image, ImageOps

import numpy as np
import scipy.misc as misc
import os
import glob
import cv2
from utils.misc import thresh_OTSU, ReScaleSize, Crop
# from utils.model_eval import eval
from utils.evaluation_metrics import get_acc

DATABASE = './DRIVE/'
#
args = {
    # 'root'     : './dataset/' + DATABASE,
    # 'test_path': './dataset/' + DATABASE + 'test/',
    'test_path': '/home/jiayu/Desktop/Jimmy/enh_stillGan/',
    'pred_path': 'assets/' + 'JiaoMo/',

    'img_size': 384
}


# if not os.path.exists(args['pred_path']):
#     os.makedirs(args['pred_path'])


def rescale(img):
    w, h = img.size
    min_len = min(w, h)
    new_w, new_h = min_len, min_len
    scale_w = (w - new_w) // 2
    scale_h = (h - new_h) // 2
    box = (scale_w, scale_h, scale_w + new_w, scale_h + new_h)
    img = img.crop(box)
    return img


def ReScaleSize_DRIVE(image, re_size=512):
    w, h = image.size
    min_len = min(w, h)
    new_w, new_h = min_len, min_len
    scale_w = (w - new_w) // 2
    scale_h = (h - new_h) // 2
    box = (scale_w, scale_h, scale_w + new_w, scale_h + new_h)
    image = image.crop(box)
    image = image.resize((re_size, re_size))
    return image  # , origin_w, origin_h


def ReScaleSize_STARE(image, re_size=512):
    w, h = image.size
    max_len = max(w, h)
    new_w, new_h = max_len, max_len
    delta_w = new_w - w
    delta_h = new_h - h
    padding = (delta_w // 2, delta_h // 2, delta_w - (delta_w // 2), delta_h - (delta_h // 2))
    image = ImageOps.expand(image, padding, fill=0)
    # origin_w, origin_h = w, h
    image = image.resize((re_size, re_size))
    return image  # , origin_w, origin_h


def load_JiaoMo():
    test_images = []
    # test_labels = []
    for file in glob.glob(os.path.join(args['test_path'], '*')):
        img_path = file
        test_images.append(img_path)

        # test_labels.append(label_path)
    return test_images


def load_net():
        # img_new = cv2.resize(384,384)
    net = torch.load('/home/jiayu/Desktop/110_Our_FFN(0.8738,0.1813).pkl', map_location='cuda:0')
    # net = torch.load('/home/jiayu/Desktop/110_Our_FFN(0.8738,0.1813).pkl')
    return net.module
    # return net


def predict():
    net = load_net()
    images = load_JiaoMo()
    transform = transforms.Compose([
        transforms.ToTensor()
    ])

    with torch.no_grad():
        net.eval()
        for i in range(len(images)):
            print(images[i])
            name_list = images[i].split('/')
            index = name_list[-1][:-4]  # 取去掉.tif（最后这四位）的basename
            # img = Image.open(images[i]).convert("L")
            img = cv2.imread(images[i], flags=0)
            image_copy = img.copy()
            width, height = img.shape
            M = 384
            N = 384
            m, n = width // M, height // N
            for i in range(m):
                for j in range(n):
                    if (i + 1) * M > width:
                        x1, x2 = width - M, width
                        y1, y2 = j * N, (j + 1) * N
                    elif (j + 1) * N > height:
                        x1, x2 = i * M, (i + 1) * M
                        y1, y2 = height - N, height
                    elif (i + 1) * M > width and (j + 1) * N > height:
                        x1, x2 = width - M, width
                        y1, y2 = height - N, height
                    else:
                        x1, x2 = i * M, (i + 1) * M
                        y1, y2 = j * N, (j + 1) * N
                    tmp = img[x1:x2, y1:y2]
                    image = transform(tmp)
                    # label = transform(label)
                    # image = transform(image)
                    image = image.unsqueeze(0).cuda()  # 将三维提升到四维，因为网络输入处理的是四维，tensor类型
                    output = net(image)  # outpu/home/jiayu/Desktop/noise-picturest.size(): [1, 1, 384, 384]
                    # label.size():[1, 384, 384]
                    output = output.data.cpu().numpy()
                    # output = output.squeeze(0)
                    # output = output.transpose(2, 1, 0).squeeze(-1)
                    output = output.squeeze(0)  ###预测、标签和原图方向不太一致，旋转了，要改squeeze这一块
                    output = (output * 255).astype(np.uint8)
                    # transfer out to numpy
                    image_copy[x1:x2, y1:y2] = output

            cv2.imwrite("/home/jiayu/Desktop/Jimmy/pred_stillGan/" + index + "seg.png", image_copy)


if __name__ == '__main__':
    predict()
