import numpy as np
from ai.Lenet5 import LeNet5
import torch
import cv2
from pathlib import Path
import sys

device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 模型初始化
model = LeNet5()

img_end_list = []

# 加载模型
mode_path = Path(__file__).resolve().parents[0]/"model_data"/"LeNet5.pth"
if not mode_path.exists():
    print("模型文件不存在")
    sys.exit()

model.load_state_dict(torch.load(mode_path,map_location=device))

# 模型验证
model.eval()
#模型加入到device
model.to(device)

class ImageProcessing:
    """
    图像处理的类
    """
    def __init__(self):
        """
        初始化
        """
        self.lower_yellow = np.array([20, 43, 46])
        self.upper_yellow = np.array([40, 255, 255])
        self.edge_img = np.ones((28, 28, 1), np.uint8) * 0
        self.edge_img[3: 25, 6: 23] = 255

    def image_position(self, image):
        """
        图像定位，将黄色色块从整个图像中取出
        :param image: 原始图像
        :return: 黄色色块的位置：[[y1, y2, x1, x2], ...]
        """
        # HSV，H：色调0-180，S：饱和度0-255，V：亮度0-255
        image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        # cv2.imshow("image_hsv", image_hsv)
        # 区间内为白色，其余为黑色
        image_thresh = cv2.inRange(image_hsv, self.lower_yellow, self.upper_yellow)
        # cv2.imshow("image_thresh", image_thresh)
        image_thresh1 = cv2.medianBlur(image_thresh, 7)  # 中值滤波
        # cv2.imshow("image_thresh1", image_thresh1)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
        image_open = cv2.morphologyEx(image_thresh1, cv2.MORPH_OPEN, kernel, iterations=1)  # 开操作
        # cv2.imshow("image_open", image_open)
        # 查找轮廓
        contours, hi = cv2.findContours(image_open, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cargo_location = []
        for i in range(len(contours)):
            cnt = contours[i]
            # 求轮廓面积
            area = cv2.contourArea(cnt)
            # 640*480
            if area < 10000:
                continue
            # print("area is:", area)
            # 找到最小的矩形
            rect = cv2.minAreaRect(cnt)
            # 拿到四个角的坐标点
            box = cv2.boxPoints(rect)
            # print(box)
            # 取整,numpy.int64
            box = np.int0(box)
            # 让小于0的等于0
            box = np.maximum(box, 0)
            ys = [box[0, 1], box[1, 1], box[2, 1], box[3, 1]]
            xs = [box[0, 0], box[1, 0], box[2, 0], box[3, 0]]
            ys_sort_index = np.argsort(ys)
            xs_sort_index = np.argsort(xs)
            x1 = box[xs_sort_index[0], 0]
            x2 = box[xs_sort_index[3], 0]
            y1 = box[ys_sort_index[0], 1]
            y2 = box[ys_sort_index[3], 1]
            location = [y1, y2, x1, x2]
            cargo_location.append(location)
        return image_open, cargo_location

    def image_sort(self, cargo_location):
        """
        图像的位置寻找、排序
        :param cargo_location: 图像中数字的位置
        :return: 查找、排序结果
        """
        image_num = len(cargo_location)
        cargo_location_sort = [[], [], [], []]
        for i in range(image_num):
            if ((cargo_location[i][0] + cargo_location[i][1]) / 2 < 240) and \
                    ((cargo_location[i][2] + cargo_location[i][3]) / 2 < 320):
                cargo_location_sort[0] = cargo_location[i]
            elif ((cargo_location[i][0] + cargo_location[i][1]) / 2 < 240) and \
                    ((cargo_location[i][2] + cargo_location[i][3]) / 2 > 320):
                cargo_location_sort[1] = cargo_location[i]
            elif ((cargo_location[i][0] + cargo_location[i][1]) / 2 > 240) and \
                    ((cargo_location[i][2] + cargo_location[i][3]) / 2 < 320):
                cargo_location_sort[2] = cargo_location[i]
            elif ((cargo_location[i][0] + cargo_location[i][1]) / 2 > 240) and \
                    ((cargo_location[i][2] + cargo_location[i][3]) / 2 > 320):
                cargo_location_sort[3] = cargo_location[i]
        return cargo_location_sort

    def edge_processing(self, img):
        """
        边缘处理
        :param img: 切割后的图像
        :return: 二值化图像
        """
        img = cv2.resize(img, (28, 28), interpolation=cv2.INTER_CUBIC)
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, img_thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
        img_end = cv2.bitwise_and(img_thresh, self.edge_img, dst=None, mask=None)
        return img_end

    def edge_processing(self, img):
        """
        边缘处理
        :param img: 切割后的图像
        :return: 二值化图像
        """
        img = cv2.resize(img, (28, 28), interpolation=cv2.INTER_CUBIC)
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, img_thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
        img_end = cv2.bitwise_and(img_thresh, self.edge_img, dst=None, mask=None)
        return img_end

    def image_recognize(self, cargo_location_sort, image):
        """
        图像识别
        :param cargo_location_sort: 排序后的黄色色块位置
        :param image: 原始图像
        :return: 识别结果
        """
        location_result = {}
        for i in range(len(cargo_location_sort)):
            if cargo_location_sort[i]:
                img = image[cargo_location_sort[i][0]: cargo_location_sort[i][1],
                      cargo_location_sort[i][2]: cargo_location_sort[i][3]]
                img_end = self.edge_processing(img)
                # cv2.imshow("img", img_end)
                # cv2.waitKey(0)
                img_end_list.append(img_end)

                # 使用lenet5 进行预测
                img_binary_np = img_end.astype(float) / 255.0
                img_binary_tensor = torch.unsqueeze(torch.unsqueeze(torch.tensor(img_binary_np, dtype=torch.float32), 0),0).to(device)
                outputs = model(img_binary_tensor)
                _, predicted = torch.max(outputs.data, dim=1)
                # print(predicted.item())
                location_result[i] = predicted.item()
        # print(location_result)
        return location_result


# image_processing = ImageProcessing()
# # image = cv2.imread("./pic/low_light.jpg")
# # image = cv2.imread("./pic/normal.jpg")
# image = cv2.imread("../pic/glare.jpg")
# # image = cv2.imread("./pic/sun.jpg")
# image_open, cargo_location = image_processing.image_position(image)
# # print(cargo_location)
# cargo_location_sort = image_processing.image_sort(cargo_location)
# # print(cargo_location_sort)
# location_result = image_processing.image_recognize(cargo_location_sort, image)
# print(location_result)
# # cv2.imshow("image", image)
# # cv2.waitKey(0)
#
