import warnings
warnings.filterwarnings('ignore')
import os
import cv2

import numpy as np
import torch
import glob
from model import edge_model
from torchvision import transforms
import time
from PIL import Image
from collections import OrderedDict
from matplotlib import pyplot as plt

def get_eval_mask(image_path, seg_model):
    t = time.time()
    img = cv2.imread(image_path)
    img = cv2.resize(img, dsize=(960,544))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    imgs = Image.fromarray(img)
    imgs = trans(imgs)
    imgs = torch.unsqueeze(imgs, 0).cuda()
    out = seg_model(imgs)


    return out



model_sp = edge_model.SegUNet(9)
model_sp.eval()
#model_sp.load_state_dict(torch.load('F:/xag_fpv_1123_scale257.pth'),strict=True)
model_sp.load_state_dict(torch.load('F:/edge_model_1219_544att_304.pth', map_location='cpu'),strict=True)
model_sp.cuda()


images_path = glob.glob('F:/1225/JPEGImages/*.jpg')
#images_path = glob.glob('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/data/ping/train/images/*.jpg')
trans = transforms.Compose([
            transforms.ToTensor(),

        ])
np.random.shuffle(images_path)
#eval_utils.eval_tea(model)
with torch.no_grad():
    for ip in images_path:
        ip = 'F:/3358462_122.jpg'
        print(ip)
        img = cv2.imread(ip)
        img = cv2.resize(img, dsize=(960, 544))

        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        out = get_eval_mask(ip, model_sp)

        nt = torch.softmax(out,dim=1)
        out = torch.sigmoid(out)[0]
        out = out.cpu().detach().numpy()
        nt = nt.cpu().detach().numpy()[0]

        plt.subplot(221)
        plt.imshow(img)
        plt.subplot(222)
        plt.imshow(out[6])

        plt.subplot(223)
        plt.imshow(out[1])
        plt.subplot(224)
        plt.imshow(np.argmax(out,axis=0))
        plt.show()

