# -*- coding: UTF-8 -*-
import io
import os
import time
from urllib.request import urlopen

import cv2
import numpy
import numpy as np
from PIL import Image

from opencv_tool.HogGetter import makeHogForDir, getHogFeature2

# 计算的特征纬度，和HOG算法有关系
DIMEN = 144

class ImageTarget:

    def __init__(self, targetImg, x):
        self.targetImg = targetImg
        self.x = x
        self.recResult = ""


def split_image_lvfu(img):
    '''
    分解目标图片
    :param img:
    :return:
    '''

    cv2.threshold(img, 150, 255, cv2.THRESH_BINARY, img)  # 二值化
    t_width = img.shape[1]
    t_height = img.shape[0]

    black_img = filter_out(img.copy())  # 提取黑色

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    black_img = cv2.erode(black_img, kernel)  # 腐蚀
    cv2.imwrite("temp.png", black_img)
    bgrimg = cv2.imread("temp.png")

    bgrimg = cv2.cvtColor(bgrimg, cv2.COLOR_BGR2GRAY)  # 颜色空间转换成灰度
    contours, hierarchy = cv2.findContours(bgrimg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)  # 联通区域查找、轮廓查找
    targets = []

    for con in contours:
        box = cv2.boundingRect(con)  # x，y，w，h
        if box[2] < t_width and (box[3] > t_height * 1.0 / 2):  # 寻找候选区
            target = black_img[box[1]:box[1] + box[3], box[0]:box[0] + box[2]]
            targets.append(ImageTarget(target, box[0]))  #封装候选区域，候选区图片和X轴起点

    return targets


def read_ne_mg_4_opencv(http_path):
    '''
    读取网络图片
    :param http_path:
    :return:
    '''
    image_bytes = urlopen(http_path).read()
    im = Image.open(io.BytesIO(image_bytes))
    img = cv2.cvtColor(numpy.asarray(im), cv2.COLOR_RGB2BGR)
    return img


def filter_out(src_frame):
    if src_frame is not None:
        hsv = cv2.cvtColor(src_frame, cv2.COLOR_BGR2HSV)
        lower_val = np.array([0, 0, 0])
        upper_val = np.array([179, 255, 127])
        mask = cv2.inRange(hsv, lower_val, upper_val)
        mask_inv = cv2.bitwise_not(mask)
        return mask_inv


def rec_validate(mat):
    '''
    识别指定文件、路径的图片
    :param path:
    :return:
    '''
    images = split_image_lvfu(mat)  # 输入图像，并拆分目标区域（因为我们用的是2-step 算法，所以这一步的目的就是获取region area）
    # # 初始化机器学习引擎
    vec, list = makeHogForDir("./sample", dimen=DIMEN)
    label_list = []  # 所有的标签
    labelMap = {}  # 因为机器学习引擎label只能是数字，所以我么把这个文件路径转换成数字
    for l in list:
        fname = int(os.path.basename(l).split(".")[0])  # 把文件名当做标签
        label_list.append(fname)
        labelMap[int(fname)] = l  # 标签

    # 这是所有的标签，转换成一列N行输入，传入KNN引擎
    label_sample = np.array(label_list).astype(np.float32).reshape((len(label_list), 1))
    # 初始化机器学习引擎
    knn = cv2.ml.KNearest_create()
    # 传入数据和标签，ROW_SAMPLE 代表的每一行代表一个样本
    knn.train(vec, cv2.ml.ROW_SAMPLE, label_sample)
    rec_result = []

    for img in images:
        # 拆分出目标区域，并计算他的特征，注意纬度必须要和样本一致
        newcomer = getHogFeature2(img.targetImg).reshape((1, DIMEN))
        # 这里很关键，还记得KNN 的意思么，这里我们只需要找到一个最接近的，那么我们要识别的图片就是匹配到的这个图
        _, results, neighbours, dist = knn.findNearest(newcomer, 1)
        lmp = labelMap[int(results)]  # results 就是 label，这里是整数，其实就是样本文件的文件名，这里我们重新映射会文件路径
        this_result = str(os.path.basename(os.path.dirname(lmp)))
        img.recResult = this_result
        rec_result.append(img)  # 这里把识别结果封装了一下，封装了识别结果，已经这个目标图片的起始位置，因为识别结果必须是有顺序的

    #排序函数
    def take_second(elem):
        return elem.x

    rec_result.sort(key=take_second, reverse=False)  # 按照X轴起始点排序
    # 到此位置，所有的识别结果，按顺序返回
    return rec_result


if __name__ == "__main__":

    path = "./test/test1.png"

    while True:
        start = time.time()
        if path.startswith("http"):
            img = read_ne_mg_4_opencv(path)
        else:
            img = cv2.imread(path)

        cv2.imshow("img", img)

        rets = rec_validate(img)

        for x in range(0, len(rets)):
            print(rets[x].recResult)
        end = time.time()
        print("cost:" + str(end - start))

        cv2.imshow("验证码", img)

        if 27 == cv2.waitKey(1000 * 3):
            exit(0)
        if not path.startswith("http"):
            exit(0)
