import cv2
import numpy as np
import os
import random
from numpy.linalg import norm
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt

SZ = 20
PROVINCE_START = 1000
# 不能保证包括所有省份
provinces = [
    "zh_cuan", "川",
    "zh_e", "鄂",
    "zh_gan", "赣",
    "zh_gan1", "甘",
    "zh_gui", "贵",
    "zh_gui1", "桂",
    "zh_hei", "黑",
    "zh_hu", "沪",
    "zh_ji", "冀",
    "zh_jin", "津",
    "zh_jing", "京",
    "zh_jl", "吉",
    "zh_liao", "辽",
    "zh_lu", "鲁",
    "zh_meng", "蒙",
    "zh_min", "闽",
    "zh_ning", "宁",
    "zh_qing", "靑",
    "zh_qiong", "琼",
    "zh_shan", "陕",
    "zh_su", "苏",
    "zh_sx", "晋",
    "zh_wan", "皖",
    "zh_xiang", "湘",
    "zh_xin", "新",
    "zh_yu", "豫",
    "zh_yu1", "渝",
    "zh_yue", "粤",
    "zh_yun", "云",
    "zh_zang", "藏",
    "zh_zhe", "浙"
]



def use_mask(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    # 使用蓝色遮罩，对图片做一次过滤，消除过多的影响
    # 创建蓝色的颜色值范围
    lower_hsv = np.array([100, 43, 46])
    upper_hsv = np.array([124, 255, 255])
    # 根据颜色值范围创建遮罩
    # 该遮罩会选出图片中所有带有蓝色的部分
    mask = cv2.inRange(hsv, lowerb=lower_hsv, upperb=upper_hsv)
    # 使用遮罩，对图片进行 "与" 操作
    # 原理： 遮罩是原本为蓝色的部分都为255， 非蓝色部分为0 的一张 二值化的图
    #   使用该图对原图像进行遮罩处理（与），将只保留蓝色的部分原图，而去除其他的所有的部分
    return mask


def save_img(imgs, titles, path):
    # plotf = plt.figure(random.randint(0, 50))
    plt.subplot(221), plt.imshow(imgs[0], "gray"), plt.title(titles[0])
    plt.subplot(222), plt.imshow(imgs[1], "gray"), plt.title(titles[1])
    plt.subplot(223), plt.imshow(imgs[2], "gray"), plt.title(titles[2])
    # print("./static/" + path)
    plt.savefig("./static/" + path)
    plt.cla()
    plt.clf()


def deskew(img):
    # 计算图像中的中心矩
    m = cv2.moments(img)
    # 判断是否进行图像矫正
    if abs(m['mu02']) < 1e-2:
        return img.copy()
    # 旋转角度比例
    skew = m['mu11'] / m['mu02']
    # 旋转矩阵
    M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]])
    # 进行仿射变换矫正和调整图像大小
    img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
    return img


# 来自opencv的sample，用于svm训练
def preprocess_hog(digits):
    samples = []
    for img in digits:
        # 寻找图形边缘
        # 垂直
        gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
        # 水平
        gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
        # 直角坐标系转极坐标系
        mag, ang = cv2.cartToPolar(gx, gy)
        bin_n = 16
        bin = np.int32(bin_n * ang / (2 * np.pi))
        # 元组
        bin_cells = bin[:10, :10], bin[10:, :10], bin[:10, 10:], bin[10:, 10:]
        mag_cells = mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:]
        # 遍历bin_cells 和 mag_cells，将其每一个都有转化为列表
        hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
        hist = np.hstack(hists)

        # transform to Hellinger kernel
        eps = 1e-7
        hist /= hist.sum() + eps
        hist = np.sqrt(hist)
        hist /= norm(hist) + eps

        samples.append(hist)
        # print("训练svm中所有图形转换")
        # print(np.float32(samples))
    # print("图片转换结束")
    return np.float32(samples)


class StatModel(object):
    def load(self, fn):
        self.model = cv2.ml.SVM_load(fn)

    def save(self, fn):
        self.model.save(fn)


class SVM(StatModel):
    def __init__(self, C=1, gamma=0.5):
        # 创建支持向量机
        self.model = cv2.ml.SVM_create()
        # C是惩罚系数，即对误差的宽容度。c越高，说明越不能容忍出现误差,容易过拟合。C越小，容易欠拟合。C过大或过小，泛化能力变差
        # gamma是选择RBF函数作为kernel后，该函数自带的一个参数。隐含地决定了数据映射到新的特征空间后的分布，gamma越大，支持向量越少，gamma值越小，支持向量越多。支持向量的个数影响训练与预测的速度
        # RBF核将样本映射到高维空间
        self.model.setGamma(gamma)
        self.model.setC(C)
        self.model.setKernel(cv2.ml.SVM_RBF)
        self.model.setType(cv2.ml.SVM_C_SVC)

    # 训练svm
    def train(self, samples, responses):
        self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)

    # 字符识别
    def predict(self, samples):
        r = self.model.predict(samples)
        return r[1].ravel()


class train2use_SVM():

    def __init__(self):
        # 识别英文字母和数字
        self.model = SVM(C=1, gamma=0.5)
        # 识别中文
        self.modelchinese = SVM(C=1, gamma=0.5)
        # 如果存在svm.dat文件，则直接加载该文件，
        # 否则就训练svm，并保存该文件
        if os.path.exists("./static/svm/svm.dat"):
            self.model.load("./static/svm/svm.dat")
        else:
            chars_train = []  # 训练字符集数组
            chars_label = []  # 训练字符层级

            for root, dirs, files in os.walk("./static/train/chars2"):
                # 如果最后一个文件夹的名字长度大于1，表示这个文件夹是外层，跳过该次循环
                if len(os.path.basename(root)) > 1:
                    continue
                # 获得当前最内层文件夹名字对应的ASCII值
                root_int = ord(os.path.basename(root))
                # 遍历文件夹内的所有图片数据
                for filename in files:
                    # 拼接文件路径
                    filepath = os.path.join(root, filename)
                    # 读取图片数据
                    digit_img = cv2.imread(filepath)
                    # 转化为灰度图
                    digit_img = cv2.cvtColor(digit_img, cv2.COLOR_BGR2GRAY)
                    # 将灰度图添加到训练字符集数组中
                    chars_train.append(digit_img)
                    # chars_label.append(1)
                    chars_label.append(root_int)
            # 将chars_train中所有的图像值依次使用deskew函数进行计算，组成一个列表（归一化）
            chars_train = list(map(deskew, chars_train))
            # 不理解
            chars_train = preprocess_hog(chars_train)
            # chars_train = chars_train.reshape(-1, 20, 20).astype(np.float32)
            # 将列表转化为numpy数组
            chars_label = np.array(chars_label)
            # print(chars_train.shape)
            # 开始训练
            self.model.train(chars_train, chars_label)
        # 如果存在svmchinese.dat文件，则直接加载该文件，
        # 否则就训练svmchinese，并保存该文件
        if os.path.exists("./static/svm/svmchinese.dat"):
            self.modelchinese.load("./static/svm/svmchinese.dat")
        else:
            chars_train = []
            chars_label = []
            for root, dirs, files in os.walk("./static/imgs/train/charsChinese"):
                if not os.path.basename(root).startswith("zh_"):
                    continue
                pinyin = os.path.basename(root)
                index = provinces.index(pinyin) + PROVINCE_START + 1  # 1是拼音对应的汉字
                flag = 0
                for filename in files:
                    filepath = os.path.join(root, filename)
                    digit_img = cv2.imread(filepath)
                    # print(flag, filename)
                    flag += 1
                    digit_img = cv2.cvtColor(digit_img, cv2.COLOR_BGR2GRAY)
                    chars_train.append(digit_img)
                    # chars_label.append(1)
                    chars_label.append(index)
            chars_train = list(map(deskew, chars_train))
            chars_train = preprocess_hog(chars_train)
            chars_label = np.array(chars_label)
            self.modelchinese.train(chars_train, chars_label)

    def __del__(self):
        if not os.path.exists("./static/svm/svm.dat"):
            self.model.save("./static/svm/svm.dat")
        if not os.path.exists("./static/svm/svmchinese.dat"):
            self.modelchinese.save("./static/svm/svmchinese.dat")


class operate_img():
    # 记载图片数据
    def __init__(self, file, file_ter=None, load_which='origin'):
        if load_which == 'origin':
            self.img = cv2.imread("." + file, cv2.IMREAD_COLOR)
        elif load_which == 'ternimal':
            self.img = cv2.imread("." + file, cv2.IMREAD_COLOR)
            self.ternimal = cv2.imread("." + file_ter, cv2.IMREAD_GRAYSCALE)
        elif load_which == 'wordsplit':
            self.final_num_plate_path = file

    # 对比不同程度的高斯模糊下（使用不同大小的高斯内核）进行二值化效果对比
    def gaussian_blur(self, kernel_size=5):
        self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        self.gaussian = cv2.GaussianBlur(self.gray, (kernel_size, kernel_size), 0)
        file_path = "imgs/process/picture_gaussian" + str(random.randint(0, 50)) + ".jpg"
        cv2.imwrite("./static/" + file_path, self.gaussian)
        return file_path

    # 对比不同程度的高斯模糊下（使用不同大小的高斯内核）进行二值化效果对比
    def median_blur(self, kernel_size=5, use_median="true"):
        if use_median == "true" or use_median == "True" or use_median:
            self.median = cv2.medianBlur(self.gaussian, kernel_size)
            file_path = "imgs/process/picture_median" + str(random.randint(0, 50)) + ".jpg"
            cv2.imwrite("./static/" + file_path, self.median)
            return file_path
        else:
            self.median = self.gaussian
            return "imgs/main/nl.jpg"

    # 寻找边缘
    def sobel_blur(self, kernel_size=3, is_vertical="true"):
        if is_vertical == "true" or is_vertical == "True":
            self.sbl = cv2.Sobel(self.median, cv2.CV_8U, 1, 0, ksize=kernel_size)
        else:
            self.sbl = cv2.Sobel(self.median, cv2.CV_8U, 0, 1, ksize=kernel_size)
        file_path = "imgs/process/picture_sobel" + str(random.randint(0, 50)) + ".jpg"
        cv2.imwrite("./static/" + file_path, self.sbl)
        return file_path

    # 二值化
    def threshold_img(self, threshold_value=170, is_otsu="true"):
        if is_otsu == "true" or is_otsu == "True" or is_otsu:
            ret2, self.th2 = cv2.threshold(self.sbl, threshold_value, 255, cv2.THRESH_BINARY)
        else:
            ret2, self.th2 = cv2.threshold(self.sbl, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
        file_path = "imgs/process/picture_threshold" + str(random.randint(0, 50)) + ".jpg"
        cv2.imwrite("./static/" + file_path, self.th2)
        return file_path

    # 膨胀
    def dilate_img(self, dilate_kernel_x, dilate_kernel_y, dilate_times):
        ele = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
        self.th2 = cv2.erode(self.th2, ele, iterations=2)
        ele = cv2.getStructuringElement(cv2.MORPH_RECT, (dilate_kernel_x, dilate_kernel_y))
        self.dilation = cv2.dilate(self.th2, ele, iterations=dilate_times)
        file_path = "imgs/process/picture_dilate" + str(random.randint(0, 50)) + ".jpg"
        cv2.imwrite("./static/" + file_path, self.dilation)
        return file_path

    # 腐蚀
    def erode_img(self, erode_kernel_x, erode_kernel_y, erode_times):
        ele = cv2.getStructuringElement(cv2.MORPH_RECT, (erode_kernel_x, erode_kernel_y))
        erosion = cv2.erode(self.dilation, ele, iterations=erode_times)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        self.ternimal = cv2.dilate(erosion, kernel, iterations=2)
        file_path = "imgs/process/picture_erode" + str(random.randint(0, 50)) + ".jpg"
        cv2.imwrite("./static/" + file_path, self.ternimal)
        return file_path

    # 生成描边图片  描绘矩形框， 筛选逼近结果
    def find_counters(self, min_specific=2.7, max_specific=5):
        contours = cv2.findContours(self.ternimal, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
        img_cp = self.img.copy()
        img_cp1 = self.img.copy()
        # 可能车牌位置
        self.np_position = []
        for i in range(len(contours)):
            cnt = contours[i]
            area = cv2.contourArea(cnt)
            # 绘制边框
            img_cp = cv2.drawContours(img_cp, contours, i, (0, 255, 0), 1)
            # 车牌的横纵比一般在2.7~5之间, 面积小于2000 即 长不到100像素 高不到40像素
            if area < 3000:
                continue
            rect = cv2.minAreaRect(cnt)
            box = cv2.boxPoints(rect)
            box1 = np.int0(box)
            if float(rect[1][0]) > float(rect[1][1]):
                ratio = float(rect[1][0]) / float(rect[1][1])
            else:
                ratio = float(rect[1][1]) / float(rect[1][0])
            # 过滤长宽不正确的部分
            if ratio > max_specific or ratio < min_specific:
                continue
            # 绘制矩形
            img_cp1 = cv2.drawContours(img_cp1, [box1], 0, (0, 255, 0), 2)
            # 将可能为车牌区域的位置信息保存
            self.np_position.append(box1)

        file_path_counters = "imgs/process/picture_counters" + str(random.randint(0, 50)) + ".jpg"
        cv2.imwrite("./static/" + file_path_counters, img_cp)
        file_path_rect = "imgs/process/picture_rect" + str(random.randint(0, 50)) + ".jpg"
        cv2.imwrite("./static/" + file_path_rect, img_cp1)
        return file_path_counters, file_path_rect

    # 过滤出真正车牌，并在一定程度上矫正
    def unify_possible_pos(self):
        all_num_plate = []
        final_num_plate = []
        for box in self.np_position:
            # 将所有可能的位置都旋转为水平方向，并拉伸为具有相同宽高的图形
            b1 = box[1] - box[0]
            b2 = box[1] - box[2]

            l1 = abs(b1[0]) + abs(b1[1])
            l2 = abs(b2[0]) + abs(b2[1])
            if l1 > l2:
                f = np.float32([box[2], box[3], box[1], box[0]])
            else:
                f = np.float32([box[1], box[2], box[0], box[3]])
            # 取出可能车牌的位置 归一化为长200高60的矩形
            t = np.float32([[0, 0], [200, 0], [0, 60], [200, 60]])
            M = cv2.getPerspectiveTransform(f, t)
            # 可能车牌的所有图片对象
            dst = cv2.warpPerspective(self.img, M, (200, 60))
            # 根据颜色判断是否是车牌
            masked_img = use_mask(dst)

            # 先根据当前的像素情况再次取车牌位置，然后使用矩形框逼近，最后统计白色遮罩图的白色像素，比例高于某个值的是车牌图片

            ele_mask1 = cv2.getStructuringElement(cv2.MORPH_RECT, (6, 6))
            dilation_mask = cv2.dilate(masked_img, ele_mask1, iterations=2)

            ele_mask2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
            erosion_mask = cv2.erode(dilation_mask, ele_mask2, iterations=2)

            contours = cv2.findContours(erosion_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
            for i in range(len(contours)):
                cnt = contours[i]
                area = cv2.contourArea(cnt)
                if area < 2000:
                    continue
                rect = cv2.minAreaRect(cnt)
                box = cv2.boxPoints(rect)
                box2 = np.int0(box)

                if float(rect[1][0]) > float(rect[1][1]):
                    ratio = float(rect[1][0]) / float(rect[1][1])
                else:
                    ratio = float(rect[1][1]) / float(rect[1][0])
                if ratio > 4.1 or ratio < 2.5:
                    continue
                # 将所有可能的位置都旋转为水平方向，并拉伸为具有相同宽高的图形
                b1 = box2[1] - box2[0]
                b2 = box2[1] - box2[2]
                if abs(b1[0]) + abs(b1[1]) > abs(b2[0]) + abs(b2[1]):
                    f = np.float32([box2[2], box2[3], box2[1], box2[0]])
                else:
                    f = np.float32([box2[1], box2[2], box2[0], box2[3]])
                # 取出可能车牌的位置
                t = np.float32([[0, 0], [200, 0], [0, 60], [200, 60]])
                M = cv2.getPerspectiveTransform(f, t)
                # 过滤后车牌图片
                dst1 = cv2.warpPerspective(dst, M, (200, 60))
                file_path_final = "imgs/process/picture_rect_final" + str(random.randint(0, 50)) + ".jpg"
                cv2.imwrite("./static/" + file_path_final, dst1)
                final_num_plate.append(file_path_final)

            file_path_all = "imgs/process/picture_rect_all" + str(random.randint(0, 50)) + ".jpg"
            cv2.imwrite("./static/" + file_path_all, dst)
            all_num_plate.append(file_path_all)
        return all_num_plate, final_num_plate

    # 分词
    def word_split(self):
        split_process_img = []
        self.split_result = []
        for img_path in self.final_num_plate_path:
            img = cv2.imread("." + img_path)
            if img is None:
                break
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            # 高斯模糊
            gaussian = cv2.GaussianBlur(gray, (5, 5), 0)
            # 中值滤波
            median = cv2.medianBlur(gaussian, 3)
            # Sobel算子找边缘  水平 和 竖直
            sbl_h = cv2.Sobel(median, cv2.CV_8U, 1, 0, ksize=3)
            sbl_v = cv2.Sobel(median, cv2.CV_8U, 0, 1, ksize=3)
            # 叠加两个方向的边缘
            sbl = cv2.add(sbl_h, sbl_v)
            # 大津二值化
            th2 = cv2.threshold(sbl, 127, 255, cv2.THRESH_BINARY)[1]
            # 闭操作
            kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 2))
            kernel1 = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
            ero = cv2.erode(th2, kernel1, iterations=1)
            dil = cv2.dilate(ero, kernel, iterations=2)
            ero = cv2.erode(dil, kernel1, iterations=2)

            dil = cv2.dilate(ero, kernel1, iterations=2)
            ero = cv2.erode(dil, kernel1, iterations=3)

            # 保存分析过程图像
            pre_name_a = "imgs/wordsplit/pre_a" + str(random.randint(0, 50)) + ".png"
            split_process_img.append(pre_name_a)
            save_img([gray, gaussian, median], ["gray", "gaussian", "median"], pre_name_a)

            pre_name_b = "imgs/wordsplit/pre_b" + str(random.randint(0, 50)) + ".png"
            split_process_img.append(pre_name_b)
            save_img([sbl_v, sbl_h, sbl], ["vertical", "horizational", "mixed"], pre_name_b)

            pre_name_c = "imgs/wordsplit/pre_c" + str(random.randint(0, 50)) + ".png"
            split_process_img.append(pre_name_c)
            save_img([th2, dil, ero], ["OTSU", "dilate", "erode"], pre_name_c)

            # 获取车牌图形宽高
            height, width = ero.shape
            white_h = []  # 记录每一列的白色像素总和
            black_h = []  # ----------黑色-------
            white_h_max = 0
            black_h_max = 0
            # 统计纵向像素值分布情况
            for i in range(width):
                s = 0  # 这一列白色总数
                t = 0  # 这一列黑色总数
                for j in range(height):
                    if ero[j][i] == 255:
                        s += 1
                    if ero[j][i] == 0:
                        t += 1
                white_h_max = max(white_h_max, s)
                black_h_max = max(black_h_max, t)
                white_h.append(s)
                black_h.append(t)

            white_v = []  # 记录每一列的白色像素总和
            black_v = []  # ----------黑色-------
            white_v_max = 0
            black_v_max = 0
            # 统计横向像素值分布情况
            for i in range(height):
                s = 0  # 这一行白色总数
                t = 0  # 这一行黑色总数
                for j in range(width):
                    if ero[i][j] == 255:
                        s += 1
                    if ero[i][j] == 0:
                        t += 1
                white_v_max = max(white_v_max, s)
                black_v_max = max(black_v_max, t)
                white_v.append(s)
                black_v.append(t)

            # 生成0-width的数组
            x_h = np.arange(width)
            x_v = np.arange(height)
            # 点乘白色矩阵最大值倒数
            # f_analyze = plt.figure("analyze")
            # f_analyze.clear()
            plt.subplot(211), plt.bar(x_h, white_h, 1), plt.title("vertial white pixel")
            plt.subplot(212), plt.bar(x_v, white_v, 1), plt.title("horizational white pixel")
            plt.tight_layout()
            analyze_pixel = "imgs/wordsplit/analyzePixel" + str(random.randint(0, 50)) + ".png"
            split_process_img.append(analyze_pixel)
            plt.savefig("./static/" + analyze_pixel)
            plt.cla()
            plt.clf()

            # 某个字符片段
            # 使用白色像素累加方式寻找
            # 实现在纵向获取车牌图片的分割宽度
            # 方式累加有像素的位置
            #   容错处理，允许包含2个为0的值
            sub_h = []  # 寻找时记录当前文字宽度的临时变量
            pos_h = []  # 记录找到的文字位置信息
            flag_h = 0  # 统计0数量的标记位
            prev_h = 0  # 前一个值
            flag_abnormal = False  # 非正常方式结束上一个节点
            for i in range(width):
                if white_h[i] > 0:
                    sub_h.append(i)

                    if i == width - 1:
                        if len(sub_h) > 9:
                            pos_h.append([sub_h[0], sub_h[-1]])
                        break
                    elif len(sub_h) > 23:
                        # 如果已经存放了超过22个像素，那么就在后面找局部最小值
                        if prev_h <= white_h[i] or len(sub_h) > width / 8:
                            pos_h.append([sub_h[0], sub_h[-1]])
                            sub_h = []
                            flag_abnormal = True
                    if flag_abnormal:
                        if prev_h < white_h[i]:
                            flag_abnormal = False
                        elif len(sub_h):
                            sub_h.pop()
                    prev_h = white_h[i]
                else:
                    if len(sub_h) > width / 18:
                        # 如果已经存放了超过18像素，那么就认为这里面是一个字符
                        pos_h.append([sub_h[0], sub_h[-1]])
                        sub_h = []
                    elif len(sub_h) > 4 and flag_h < 3:  # 已经有超过个4像素在里面 连续2个空允许通过
                        sub_h.append(i)
                        flag_h += 1
                    else:
                        flag_h = 0
                        sub_h = []
            # 实现横向截取文字部分最小的高度
            sub_v = []  # 寻找时记录当前文字宽度的临时变量
            pos_v = []  # 记录找到的文字位置信息
            flag_v = 0  # 统计0数量的标记位
            for i in range(height):
                if white_v[i] > 0:
                    sub_v.append(i)
                    if i == height - 1:
                        pos_v.append([sub_v[0], sub_v[-1]])
                        break
                else:
                    if len(sub_v) > height / 2:
                        pos_v.append([sub_v[0], sub_v[-1]])
                    elif len(sub_v) > height / 30 and flag_v < 3:
                        sub_v.append(i)
                        flag_v += 1
                    else:
                        sub_v = []
                        flag_v = 0
            v_s = pos_v[0][0] - 3
            v_e = pos_v[0][1] + 3
            # 如果大小超出边界则直接取边界值
            v_s = v_s if v_s > 0 else 0
            v_e = v_e if v_e < height else height - 1
            # 保存分割后的图像
            for i in range(len(pos_h)):
                h_s = pos_h[i][0] - 2
                h_e = pos_h[i][1] + 2
                h_s = h_s if h_s > 0 else 0
                h_e = h_e if h_e < width else width - 1
                part_card = img[v_s: v_e, h_s: h_e]
                # part_card_mask = use_mask(part_card)
                # # 对遮罩进行一次很小的腐蚀膨胀，去掉细微干扰的噪点
                # knl = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
                # part_card_mask = cv2.erode(part_card_mask, knl, iterations=1)
                # part_card_mask = cv2.dilate(part_card_mask, knl, iterations=3)
                # cv2.imshow("part_card_mask", part_card_mask)
                #
                # contours = cv2.findContours(part_card_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
                # area_old = 0  # 前一个图的面积
                # part_card1 = None
                # for j in range(len(contours)):
                #     cnt = contours[j]
                #     area = cv2.contourArea(cnt)
                #     # 跳过面积小的部分
                #     if area < area_old:
                #         continue
                #     area_old = area
                #     box2 = np.int0(cv2.boxPoints(cv2.minAreaRect(cnt)))
                #
                #     # 将所有可能的位置都旋转为水平方向，并拉伸为具有相同宽高的图形
                #     b1 = box2[1] - box2[0]
                #     b2 = box2[1] - box2[2]
                #     if abs(b1[0]) + abs(b1[1]) >= abs(b2[0]) + abs(b2[1]):
                #         f = np.float32([box2[1], box2[2], box2[0], box2[3]])
                #     else:
                #         f = np.float32([box2[2], box2[3], box2[1], box2[0]])
                #     # 取出可能字符的位置
                #     t = np.float32([[0, 0], [40, 0], [0, 40], [40, 40]])
                #     mtx = cv2.getPerspectiveTransform(f, t)
                #     # 在原图中取出对应位置的字符图片， 并将大小矫正为40*40
                #     part_card1 = cv2.warpPerspective(part_card, mtx, (40, 40))
                split_result_path = "imgs/wordsplit/split_result/each_word" + str(random.randint(0, 300)) + ".jpg"
                # if part_card1 is None:
                #     cv2.imshow("part_card", part_card)
                cv2.imwrite("./static/" + split_result_path, part_card)
                # else:
                #     cv2.imshow("part_card", part_card1)
                #     cv2.imwrite("./static/" + split_result_path, part_card1)
                # cv2.waitKey(0)
                cv2.imwrite("./static/" + split_result_path, part_card)
                self.split_result.append(split_result_path)
        return split_process_img, self.split_result

    # 字符识别
    def predict(self):
        svm = train2use_SVM()
        # 车牌图片
        predict_img = []
        # 成功识别的车牌字符
        predict_result = []
        # 字符数量少于7个，那么必然是字符分割失败导致的
        if len(self.split_result) < 7:
            return False
        print("len(self.split_result)=", len(self.split_result))
        for i, part_card_path in enumerate(self.split_result):
            part_card = cv2.imread("./static/" + part_card_path, cv2.IMREAD_GRAYSCALE)

            # continue
            part_card_final = cv2.threshold(part_card, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]

            part_card_origin = part_card_final.copy()
            w = abs(part_card_final.shape[1] - SZ) // 2 # 宽度减去1000开根

            part_card_final = cv2.copyMakeBorder(part_card_final, 0, 0, w, w, cv2.BORDER_CONSTANT, value=[0, 0, 0])

            part_card_final = cv2.resize(part_card_final, (SZ, SZ), interpolation=cv2.INTER_AREA)
            part_card_final = preprocess_hog([part_card_final])
            if i == 0:  # 第一个中文字符
                resp = svm.modelchinese.predict(part_card_final)
                charactor = provinces[int(resp[0]) - PROVINCE_START]
            else:  # 英文或者数字
                resp = svm.model.predict(part_card_final)
                charactor = chr(resp[0])
            # 判断最后一个数是否是车牌边缘，假设车牌边缘被认为是1
            if charactor == "1" and i > 6:
                # if part_card_origin.shape[0] / part_card_origin.shape[1] >= 7:  # 1太细，认为是边缘
                continue
            # 保存这些用于识别的图片
            part_card1 = cv2.resize(part_card_origin, (50, 50), interpolation=cv2.INTER_AREA)
            predict_image = "imgs/wordsplit/predict/word_predict" + str(random.randint(0, 300)) + ".jpg"
            cv2.imwrite("./static/" + predict_image, part_card1)
            predict_img.append(predict_image)
            # 保存识别结果字符
            predict_result.append(charactor)
        return predict_img, predict_result


# 测试语句要将路径中的 . 换成 ..才可以访问到图片
# pp = operate_img(["./static/imgs/3.jpg"], load_which='wordsplit')
# pp.word_split()
# print(pp.predict())