# coding: utf-8
import torch
import os
import cv2
import random
import numpy as np
from collections import OrderedDict
from pylab import plt
from models.yolov3 import yolov3
from models.yololayer import YOLOLayer
from utils.utils import non_max_suppression, load_classes, load_weights
from PIL import Image

USE_CUDA = False
GPU_ID = [0]
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in GPU_ID])
device = torch.device("cuda:0" if torch.cuda.is_available() and USE_CUDA else "cpu")

cp_path = "C:\\Users\\ZiGAA\\Documents\\model\\yolov3_iter_1700000.pkl"
root_path = "E:\\zhigan\\detection\\data"
image_list= "E:\\zhigan\\detection\\labels.txt"

# cp_path = r"C:\Users\ZiGAA\Documents\model\yolov3_gui_400000.pkl"
# root_path = r"E:\BaiduNetdiskDownload\duojinggui\val"
# image_list = r"E:\BaiduNetdiskDownload\duojinggui\val\files.txt"
num_classes = 1
classes = ["1", "2", "3"]

conf_thresh = 0.8
nms_thresh = 0.35

# ANCHORS
with open("config/coco_anchors.txt") as f:
    lines = f.readlines()
    anchors = [[int(i.split(',')[0]), int(i.split(',')[1])] for i in lines]
    anchors = [[float(w) / 416 * 256, float(h) / 416 * 256] for w, h in anchors]

net = yolov3(num_classes)
head = [YOLOLayer(anchors[0:3], num_classes),
        YOLOLayer(anchors[3:6], num_classes),
        YOLOLayer(anchors[6:9], num_classes)]

net = load_weights(cp_path, net)

net.to(device)
net.eval()

head = [i.to(device) for i in head]
head = [i.eval() for i in head]

MEAN = 127.5
with open(image_list) as f:
    lines = f.readlines()
image_path = lines[12].strip().split('|')[0]
image_path = os.path.join(root_path, image_path)
# image = cv2.imread(image_path, 1)
image = np.array(Image.open(image_path))
if image.shape != 3:
    image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
image = cv2.resize(image, (256, 256))

origin_image = image.copy()
image = image.astype(np.float32)

image -= MEAN
image /= MEAN

input_tensor = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(0)
input_tensor = input_tensor.to(device)

with torch.no_grad():
    y1, y2, y3 = net(input_tensor)
    o1, _ = head[2](y1, img_dim=256)
    o2, _ = head[1](y2, img_dim=256)
    o3, _ = head[0](y3, img_dim=256)
    output = torch.cat((o1, o2, o3), 1)
    output = non_max_suppression(output, conf_thresh, nms_thresh)

output = output[0]

unique_labels = output[:, -1].cpu().unique()
n_cls_preds = len(unique_labels)

tmp = origin_image.copy()
for x1, y1, x2, y2, conf, cls_conf, cls_pred in output:
    tmp = cv2.rectangle(tmp, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 1)

# for x1, y1, x2, y2, conf, cls_conf, cls_pred in output:
#
#
#     tmp = cv2.rectangle(tmp, (int(x1), int(y1)), (int(x1 + box_w), int(y1 + box_h)), (0, 255, 0), 2)
print(output)
plt.figure(),plt.imshow(tmp[:,:,::-1]), plt.show()



print("done")
