import os
from skimage import io, transform
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms  

import numpy as np
from PIL import Image

from data_loader_SPARCS import ToTensorNorm, CloudDataset

from model import BoundaryNets

import pandas as pd

# use cudnn
torch.backends.cudnn.benchmark = True
os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"


def ensure_dir(dir_path):
    if not os.path.isdir(dir_path):
        os.makedirs(dir_path)

def normPRED(d):
    ma = torch.max(d)
    mi = torch.min(d)
    
    dn = (d - mi) / (ma - mi)
    
    return dn

def save_output(image_name, pred, d_dir):
    predict = pred
    predict = predict.squeeze()
    predict_np = (predict.cpu().data.numpy() * 255).astype(np.uint8)
    im = Image.fromarray(predict_np).convert('RGB')
    im.save(d_dir + image_name + '.png')


# --------- 1. get image path and name ---------
test_list_txt = "/home/data/wukang/datasets/l8/test_patchs.txt"
prediction_dir = "change to your prediction image saved path"
model_dir = "change to your model saved path"

ensure_dir(prediction_dir)

with open(test_list_txt, 'r',encoding='utf-8') as f:
    test_list = f.readlines()
test_list = [val.replace("\n","") for val in test_list]



# --------- 2. dataloader ---------
test_salobj_dataset = CloudDataset(file_name_list=test_list,
                                    transform=transforms.Compose([ToTensorNorm()]),
                                    test_mode=1)
test_salobj_dataloader = DataLoader(test_salobj_dataset, batch_size=1, shuffle=False, num_workers=1)

# --------- 3. model define ---------
print("...load BoundaryNets...")
net = BoundaryNets(10, 1)

if torch.cuda.is_available():
    if len(os.environ["CUDA_VISIBLE_DEVICES"].split(",")) >1:
        net = nn.DataParallel(net)
    net = net.cuda()
net.load_state_dict(torch.load(model_dir)['model_state_dict'])
net.eval()

maxval = len(test_salobj_dataloader)
# --------- 4. inference for each image ---------
for i_test, data_test in enumerate(test_salobj_dataloader):
    inputs_test = data_test['image']
    if inputs_test[:, 0, :, :].sum() == 0:
        pred = np.zeros((384, 384))
        im = Image.fromarray(np.uint8(pred)).convert('RGB')
        im.save(prediction_dir + test_list[i_test][10:-4] + '.png')
    else:
        inputs_test = inputs_test.type(torch.FloatTensor)
        
        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)
        
        d1, d2, d3, d4, d5, d6, d7, d8 = net(inputs_test)
        

        pred = d1[:, 0, :, :]
        pred = normPRED(pred)
        save_output(test_list[i_test][10:-4], pred, prediction_dir)
        del d1, d2, d3, d4, d5, d6, d7, d8
    print("[%d / %d]inferencing %s" % (i_test, maxval, test_list[i_test].split("/")[-1]))
