import cv2
import os
import sys
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

char_table = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
              'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '川', '鄂', '赣', '甘', '贵',
              '桂', '黑', '沪', '冀', '津', '京', '吉', '辽', '鲁', '蒙', '闽', '宁', '青', '琼', '陕', '苏', '晋',
              '皖', '湘', '新', '豫', '渝', '粤', '云', '藏', '浙']

def hist_image(img):
    assert img.ndim == 2  #2维，即灰度图
    hist = [0 for i in range(256)]  #hist = [0,0,0,......]
    img_h,img_w = img.shape[0],img.shape[1]

    for row in range(img_h):
        for col in range(img_w):
            hist[img[row,col]] += 1     #灰度为img[x,y]的统计+1
    p = [hist[n] / (img_w * img_h) for n in range(256)] #p是各灰度值像素占图片的比的数组
    p1 = np.cumsum(p)   #p1是p的前n项和Sn组成的数组

    for row in range(img_h):
        for col in range(img_w):
            v = img[row,col]    #v是像素点的灰度
            img[row,col] = p1[v] * 255      #更改像素点的灰度为255*p1[v]，p1[v]为灰度为v的Sn
    return img

#识别车牌区域
def find_board_area(img):
    assert img.ndim == 2
    img_h,img_w = img.shape[0],img.shape[1]

    #车牌范围
    top,bottom,left,right = 0,img_h,0,img_w
    #找到车牌
    flag = False
    #h_p=[0,0,0,......]，v_p=[0,0,0,.......]
    h_proj = [0 for i in range(img_h)]
    v_proj = [0 for i in range(img_w)]

    #从高度的一半到0.8循环，每步距离3个点
    for row in range(round(img_h * 0.5),round(img_h * 0.8),3):
        for col in range(img_w):        #对每一列
            if img[row,col] == 255:     #一个点的灰度为255
                h_proj[row] += 1        #h_p[row]统计+1

        if flag == False and h_proj[row] > 12:  #h_p[row]列统计大于12
            flag = True
            top = row       #这一行为顶部
        if flag == True and row > top + 8 and h_proj[row] < 12:     #等待循环出底部 找出了第一个疑似车牌区域还在循环？
            bottom = row
            flag = False        #为什么还要循环？

    for col in range(round(img_w * 0.05),img_w,1):     #从宽度的0.05到图片最右循环，每步距离1个点 //修订：0.3->0.01
        for row in range(top,bottom,1):     #在顶部和底部之间循环
            if img[row,col] == 255:
                v_proj[col] += 1

        if flag == False and (v_proj[col] > 10 or v_proj[col] - v_proj[col - 1] > 5):
            left = col
            break
    return left,top,120,bottom - top - 10       #返回车牌范围 灰度为255代表什么？

#核实范围
def verify_scale(rotate_rect):
   error = 0.4
   aspect = 4   #4.7272
   min_area = 10 * (10 * aspect)    #min_area=10*(10*4)=400
   max_area = 150 * (150 * aspect)  #max_area=150*(150*4)=90000
   min_aspect = aspect * (1 - error)    #min_aspect=4*(1-0.4)=2.4
   max_aspect = aspect * (1 + error)    #max_aspect=4*(1+0.4)=6.4
   theta = 30

   # 宽或高为0，不满足矩形直接返回False
   if rotate_rect[1][0] == 0 or rotate_rect[1][1] == 0:     #旋转矩形
       return False
   '''
   rotate_rect[0]为外接矩形的中心坐标(x,y);[1][0]为宽，[1][1]为高,[2]为旋转角度.
   旋转角度θ是水平轴（x轴）逆时针旋转，直到碰到矩形的第一条边停住，此时该边与水平轴的夹角。
   并且这个边的边长是width，另一条边边长是height
   在opencv中，坐标系原点在左上角，相对于x轴，逆时针旋转角度为负，顺时针旋转角度为正。所以，θ∈（-90度，0]
   '''

   r = rotate_rect[1][0] / rotate_rect[1][1]    #r=宽除以高
   r = max(r,1 / r)
   area = rotate_rect[1][0] * rotate_rect[1][1] #area为实际面积
   if area > min_area and area < max_area and r > min_aspect and r < max_aspect:    #如果实际面积大于最小面积且小于最大面积，并且2.4<r<6.4
       # 矩形的倾斜角度在不超过theta
       if ((rotate_rect[1][0] < rotate_rect[1][1] and rotate_rect[2] >= -90 and rotate_rect[2] < -(90 - theta)) or (rotate_rect[1][1] < rotate_rect[1][0] and rotate_rect[2] > -theta and rotate_rect[2] <= 0)):
           return True
   return False

#形态（角度）修正
#传入填充掩膜后的最小矩形，原图
def img_Transform(car_rect,image):
    img_h,img_w = image.shape[:2]
    rect_w,rect_h = car_rect[1][0],car_rect[1][1]
    angle = car_rect[2]

    return_flag = False
    if car_rect[2] == 0:#旋转角度为0
        return_flag = True
    if car_rect[2] == -90 and rect_w < rect_h:#旋转角度=-90并且矩形的宽<高
        rect_w, rect_h = rect_h, rect_w
        return_flag = True
    if return_flag:
        car_img = image[int(car_rect[0][1] - rect_h / 2):int(car_rect[0][1] + rect_h / 2),
                  int(car_rect[0][0] - rect_w / 2):int(car_rect[0][0] + rect_w / 2)]
        return car_img

    car_rect = (car_rect[0],(rect_w,rect_h),angle)
    box = cv2.boxPoints(car_rect)   #最小外接矩形顶点坐标

    heigth_point = right_point = [0,0]
    left_point = low_point = [car_rect[0][0], car_rect[0][1]]#矩形中心点坐标(x,y)
    for point in box:
        if left_point[0] > point[0]:
            left_point = point
        if low_point[1] > point[1]:
            low_point = point
        if heigth_point[1] < point[1]:
            heigth_point = point
        if right_point[0] < point[0]:
            right_point = point

    if left_point[1] <= right_point[1]:  # 正角度
        new_right_point = [right_point[0], heigth_point[1]]
        pts1 = np.float32([left_point, heigth_point, right_point])
        pts2 = np.float32([left_point, heigth_point, new_right_point])  # 字符只是高度需要改变
        M = cv2.getAffineTransform(pts1, pts2)

        '''
        仿射变换，其实是将图形在2D平面内做变换，变换前后图片中原来平行的线仍会保持平行，可以想象是将长方形变换为平行四边形
        M=cv2.getAffineTransform(pos1,pos2),其中两个位置就是变换前后的对应位置关系。输出的就是仿射矩阵M,shape为[2,3]
        cv.getAffineTransform将创建一个2x3矩阵，该矩阵将传递给cv.warpAffine。
        '''

        dst = cv2.warpAffine(image, M, (round(img_w * 2), round(img_h * 2)))

        '''
        cv2.warpAffine(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) → dst
                       dsize为输出图像的大小;
                       flags表示插值方式，默认为 flags=cv2.INTER_LINEAR，表示线性插值，此外还有：cv2.INTER_NEAREST(最近邻插值)、cv2.INTER_AREA(区域插值)、cv2.INTER_CUBIC(三次样条插值)、cv2.INTER_LANCZOS4(Lanczos插值)
                       borderMode - 边界像素模式
                       borderValue - 边界填充值; 默认情况下，它为0
        round() 方法返回浮点数x的四舍五入值。round(x,n) 返回浮点数x的四舍五入的小数点后的n位数值
        '''

        car_img = dst[int(left_point[1]):int(heigth_point[1]), int(left_point[0]):int(new_right_point[0])]

    elif left_point[1] > right_point[1]:  # 负角度
        new_left_point = [left_point[0], heigth_point[1]]
        pts1 = np.float32([left_point, heigth_point, right_point])
        pts2 = np.float32([new_left_point, heigth_point, right_point])  # 字符只是高度需要改变
        M = cv2.getAffineTransform(pts1, pts2)
        dst = cv2.warpAffine(image, M, (round(img_w * 2), round(img_h * 2)))
        car_img = dst[int(right_point[1]):int(heigth_point[1]), int(new_left_point[0]):int(right_point[0])]

    return car_img

#预处理
def pre_process(orig_img):

    gray_img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2GRAY)
    #cv2.imshow('gray_img', gray_img)

    blur_img = cv2.blur(gray_img, (3, 3))
    #cv2.imshow('blur', blur_img)

    sobel_img = cv2.Sobel(blur_img, cv2.CV_16S, 1, 0, ksize=3)
    sobel_img = cv2.convertScaleAbs(sobel_img)
    #cv2.imshow('sobel', sobel_img)

    hsv_img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2HSV)

    h, s, v = hsv_img[:, :, 0], hsv_img[:, :, 1], hsv_img[:, :, 2]
    # 黄色色调区间[26，34]38,蓝色色调区间:[100,124] 绿色35到80
    blue_img = (((h > 22) & (h < 80)) | ((h > 100) & (h < 124))) & (s > 70) & (v > 70)
    blue_img = blue_img.astype('float32')

    mix_img = np.multiply(sobel_img, blue_img)
    #cv2.imshow('mix', mix_img)

    mix_img = mix_img.astype(np.uint8)

    ret, binary_img = cv2.threshold(mix_img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    #cv2.imshow('binary',binary_img)

    '''
    使用最大类间方差法将图像二值化，cv2.THRESH_OTSU自适应找出最合适的阈值
    cv2.threshold(src, thresh, maxval, type[, dst]) → retval, dst
                  src：表示的是图片源
                  thresh：表示的是阈值（起始值）
                  maxval：表示的是最大值
                  type：表示的是这里划分的时候使用的是什么类型的算法，常用值为0（cv2.THRESH_BINARY）,超过阈值的设置为最大值255，其他设置为0
    返回值：
          ret ：cv2.THRESH_OTSU 求解出的阈值
          binary_img ：二值图像
    '''

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(21,5))         #获得结构元素
    close_img = cv2.morphologyEx(binary_img, cv2.MORPH_CLOSE, kernel) #闭操作，先膨胀再腐蚀，使图像轮廓更光滑（need more）
    #cv2.imshow('close', close_img)

    #cv2.waitKey()

    return close_img

# 给候选车牌区域做漫水填充算法，一方面补全上一步求轮廓可能存在轮廓歪曲的问题，
# 另一方面也可以将非车牌区排除掉
def verify_color(rotate_rect,src_image):
    img_h,img_w = src_image.shape[:2]
    mask = np.zeros(shape=[img_h + 2,img_w + 2],dtype=np.uint8)
    connectivity = 8 #种子点上下左右4邻域与种子颜色值在[loDiff,upDiff]的被涂成new_value，也可设置8邻域
    loDiff,upDiff = 25,25 #负差最大值，正差最大值.loDiff表示当前观察像素值与其部件邻域像素值或者待加入该部件的种子像素之间的亮度或颜色之负差（lower
                          #brightness/color difference）的最大值。
    new_value = 255
    flags = connectivity
    flags |= cv2.FLOODFILL_FIXED_RANGE  #考虑当前像素与种子像素之间的差，不设置的话则和邻域像素比较
    flags |= new_value << 8
    '''
    flags = flags | cv2.FLOODFILL_FIXED_RANGE
    cv.FLOODFILL_FIXED_RANGE： 指定颜色填充，二进制为 01 0000 0000 0000 0000.填充时的判断标准是：src(seed.x’, seed.y’) - loDiff <= src(x, y) <= src(seed.x’, seed.y’) +upDiff，此范围内被填充指定的颜色
    cv.FLOODFILL_MASK_ONLY:    指定位置填充，二进制为 10 0000 0000 0000 0000
    '''
    flags |= cv2.FLOODFILL_MASK_ONLY    #设置这个标识符则不会去填充改变原始图像，而是去填充掩模图像（mask）
                                        #按位或，FLOODFILL_FIXED_RANGE=2**16=65536.考虑当前像素与种子象素之间的差，不设置的话则和邻域像素比较,运算结果为01
                                        #0000 0000 0000 0100，十进制为65540
    '''
    相当于flags = 4 | cv2.FLOODFILL_FIXED_RANGE | 255 << 8 | cv2.FLOODFILL_MASK_ONLY
    通俗来讲，就是用4邻域填充，并填充固定像素值范围，填充掩码而不是填充源图像，以及设填充值为255
    标识符的0-7位为connectivity,8-15位为new_value左移8位的值，16-23位为cv2.FLOODFILL_FIXED_RANGE\cv2.FLOODFILL_MASK_ONLY或者0
        1.低8位用于控制算法的连通性，可取4（填充算法只考虑当前享受水平方向和垂直方向）/8（还考虑对角线方向）
        2.高8位可为0/FLOODFILL_FIXED_RANGE（考虑当前像素与种子像素之间的差）/FLOODFILL_MASK_ONLY(不填充改变原始图像，去填充掩模图像)
        3.中间8位制定填充掩码图像的值
    最终得到的flags为11 1111 1111 0000 0100，十进制为261892
    '''
    rand_seed_num = 10000 #生成多个随机种子 5000
    valid_seed_num = 400 #从rand_seed_num中随机挑选valid_seed_num个有效种子 200
    adjust_param = 0.1
    box_points = cv2.boxPoints(rotate_rect)

    '''
    cv2.boxPoints根据minAreaRect的返回值rotate_rect计算矩形的四个点
    旋转的边界矩形,这个边界矩形是面积最小的，因为它考虑了对象的旋转。用到的函数为cv2.minAreaRect()。返回的是一个Box2D结构，
        其中包含矩形左上角角点的坐标（x，y）,矩形的宽和高（w，h），以及旋转角度。但是要绘制这个矩形需要矩形的4个角点，可以通过函数cv2.boxPoints()获得。
        返回形式[ [x0,y0], [x1,y1], [x2,y2], [x3,y3] ]
    '''

    box_points_x = [n[0] for n in box_points]#每一个坐标点的x值
    box_points_x.sort(reverse=False)#list.sort( key=None, reverse=False)，reverse -- 排序规则，reverse = True 降序， reverse
                                    #= False 升序（默认）
    adjust_x = int((box_points_x[2] - box_points_x[1]) * adjust_param)#=(第三个x-第二个x*0.1)，对角点
    col_range = [box_points_x[1] + adjust_x,box_points_x[2] - adjust_x]
    box_points_y = [n[1] for n in box_points]#每一个坐标点的y值
    box_points_y.sort(reverse=False)
    adjust_y = int((box_points_y[2] - box_points_y[1]) * adjust_param)
    row_range = [box_points_y[1] + adjust_y, box_points_y[2] - adjust_y]
    # 如果以上方法种子点在水平或垂直方向可移动的范围很小，则采用旋转矩阵对角线来设置随机种子点
    if (col_range[1] - col_range[0]) / (box_points_x[3] - box_points_x[0]) < 0.4 or (row_range[1] - row_range[0]) / (box_points_y[3] - box_points_y[0]) < 0.4:
        points_row = []
        points_col = []
        for i in range(2):
            pt1,pt2 = box_points[i],box_points[i + 2]#第一个和第三个坐标点，第二个和第四个坐标点
            x_adjust,y_adjust = int(adjust_param * (abs(pt1[0] - pt2[0]))),int(adjust_param * (abs(pt1[1] - pt2[1])))
            if (pt1[0] <= pt2[0]):
                pt1[0], pt2[0] = pt1[0] + x_adjust, pt2[0] - x_adjust
            else:
                pt1[0], pt2[0] = pt1[0] - x_adjust, pt2[0] + x_adjust
            if (pt1[1] <= pt2[1]):
                pt1[1], pt2[1] = pt1[1] + adjust_y, pt2[1] - adjust_y
            else:
                pt1[1], pt2[1] = pt1[1] - y_adjust, pt2[1] + y_adjust
            temp_list_x = [int(x) for x in np.linspace(pt1[0],pt2[0],int(rand_seed_num / 2))]
            temp_list_y = [int(y) for y in np.linspace(pt1[1],pt2[1],int(rand_seed_num / 2))]
            points_col.extend(temp_list_x)
            points_row.extend(temp_list_y)
    else:
        points_row = np.random.randint(row_range[0],row_range[1],size=rand_seed_num)
        '''
        numpy.random.randint(low, high=None, size=None, dtype='l')返回一个随机整型数，范围从低（包括）到高（不包括），即[low, high)。如果没有写参数high的值，则返回[0,low)的值。
        定义rand_seed_num = 5000。size为输出随机数的尺寸，这里输出5000个随机数
        '''
        points_col = np.linspace(col_range[0],col_range[1],num=rand_seed_num).astype(np.int32)
        '''
        np.linspace主要用来创建等差数列。np.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None),在start和stop之间返回均匀间隔的数据
                                                     endpoint：True则包含stop；False则不包含stop; retstep如果为True则结果会给出数据间隔
                                         在[col_range[0],col_range[1]]之间输出包含5000个数据的等差数列，并将其修改格式为整型
        '''

    points_row = np.array(points_row)
    points_col = np.array(points_col)
    hsv_img = cv2.cvtColor(src_image, cv2.COLOR_BGR2HSV)
    h,s,v = hsv_img[:,:,0],hsv_img[:,:,1],hsv_img[:,:,2]
    # 将随机生成的多个种子依次做漫水填充,理想情况是整个车牌被填充
    flood_img = src_image.copy()
    seed_cnt = 0
    for i in range(rand_seed_num):
        rand_index = np.random.choice(rand_seed_num,1,replace=False)
        row,col = points_row[rand_index],points_col[rand_index]

        #FIND 越界
        if(row[0]>=len(h)):
            continue
        if(col[0]>=len(h[0])):
            continue

        # 限制随机种子必须是车牌背景色
        if (((h[row,col] > 26) & (h[row,col] < 80)) | ((h[row,col] > 100) & (h[row,col] < 124))) & (s[row,col] > 70) & (v[row,col] > 70):
            cv2.floodFill(src_image, mask, (col,row), (255, 255, 255), (loDiff,) * 3, (upDiff,) * 3, flags)

            '''
            floodFill(image, mask, seedPoint, newVal, loDiff=None, upDiff=None, flags=None)
            floodFill( 1.操作的图像, 2.掩模, 3.起始像素值，4.填充的颜色, 5.填充颜色的低值， 6.填充颜色的高值 ,7.填充的方法)   (255, 255, 255)是白色
                      mask = np.zeros(shape=[img_h+2,img_w+2],dtype=np.uint8)
                      loDiff,upDiff = 30,30;(loDiff,) * 3=(loDiff,loDiff,loDiff)
            '''

            cv2.circle(flood_img,center=(col,row),radius=2,color=(0,0,255),thickness=2)

            '''
            cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]]),根据给定的圆心和半径等画圆
            center:圆心位置；radius：圆的半径；color：圆的颜色；thickness：圆形轮廓的粗细（如果为正），负厚度表示要绘制实心圆；lineType：圆边界的类型；shift：中心坐标和半径值中的小数位数。
            '''

            seed_cnt += 1
            if seed_cnt >= valid_seed_num:
                break
    #======================调试用======================#
    show_seed = np.random.uniform(1,100,1).astype(np.uint16)

    '''
    numpy.random.uniform(low,high,size)，从一个均匀分布[low,high)中随机采样，注意定义域是左闭右开，即包含low，不包含high.
           low: 采样下界，float类型，默认值为0；
           high: 采样上界，float类型，默认值为1；
           size: 输出样本数目，为int或元组(tuple)类型
    返回值：ndarray类型，其形状和参数size中描述一致
    '''

    cv2.imshow('floodfill' + str(show_seed),flood_img)
    cv2.imshow('flood_mask' + str(show_seed),mask)

    #======================调试用======================#
    # 获取掩模上被填充点的像素点，并求点集的最小外接矩形
    mask_points = []
    for row in range(1,img_h + 1):
        for col in range(1,img_w + 1):
            if mask[row,col] != 0:
                mask_points.append((col - 1,row - 1))

    #FIND
    if len(mask_points) == 0:
        return False,False

    mask_rotateRect = cv2.minAreaRect(np.array(mask_points,dtype=np.float32))
    if verify_scale(mask_rotateRect):
        return True,mask_rotateRect
    else:
        return False,mask_rotateRect

# 车牌定位
def locate_carPlate(orig_img,pred_image):
    carPlate_list = []
    temp1_orig_img = orig_img.copy() #调试用
    temp2_orig_img = orig_img.copy() #调试用
    contours,heriachy = cv2.findContours(pred_image,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    for i,contour in enumerate(contours):

        #FIND

        cv2.drawContours(temp1_orig_img, contours, i, (0, 255, 255), 2)
        # 获取轮廓最小外接矩形，返回值rotate_rect
        rotate_rect = cv2.minAreaRect(contour)


        # 根据矩形面积大小和长宽比判断是否是车牌
        if verify_scale(rotate_rect):
            ret,rotate_rect2 = verify_color(rotate_rect,temp2_orig_img)
            if ret == False:
                continue
            # 车牌位置矫正
            car_plate = img_Transform(rotate_rect2, temp2_orig_img)
            if(len(car_plate[0])==0):
                continue

            #FIND
            car_plate = cv2.resize(car_plate,(200,50))

            #car_plate = cv2.resize(car_plate,(car_plate_w,car_plate_h)) #调整尺寸为后面CNN车牌识别做准备

            #========================调试看效果========================#
            box = cv2.boxPoints(rotate_rect2)
            for k in range(4):
                n1,n2 = k % 4,(k + 1) % 4
                cv2.line(temp1_orig_img,(box[n1][0],box[n1][1]),(box[n2][0],box[n2][1]),(255,0,0),2)
            cv2.imshow('opencv_' + str(i), car_plate)
            #========================调试看效果========================#
            carPlate_list.append(car_plate)

    cv2.imshow('contour', temp1_orig_img)

    cv2.waitKey(0)

    return carPlate_list

# 左右切割
def horizontal_cut_chars(plate):
    char_addr_list = []
    area_left,area_right,char_left,char_right = 0,0,0,0
    img_w = plate.shape[1]

    # 获取车牌每列边缘像素点个数
    def getColSum(img,col):
        sum = 0
        for i in range(img.shape[0]):
            sum += round(img[i,col] / 255)
        return sum

    sum = 0
    for col in range(img_w):
        sum += getColSum(plate,col)
    # 每列边缘像素点必须超过均值的60%才能判断属于字符区域
    col_limit = 0#round(0.5*sum/img_w)
    # 每个字符宽度也进行限制
    charWid_limit = [round(img_w / 12),round(img_w / 5)]
    is_char_flag = False

    for i in range(img_w):
        colValue = getColSum(plate,i)
        if colValue > col_limit:
            if is_char_flag == False:
                area_right = round((i + char_right) / 2)
                area_width = area_right - area_left
                char_width = char_right - char_left
                if (area_width > charWid_limit[0]) and (area_width < charWid_limit[1]):
                    char_addr_list.append((area_left,area_right,char_width))
                char_left = i
                area_left = round((char_left + char_right) / 2)
                is_char_flag = True
        else:
            if is_char_flag == True:
                char_right = i - 1
                is_char_flag = False
    # 手动结束最后未完成的字符分割
    if area_right < char_left:
        area_right,char_right = img_w,img_w
        area_width = area_right - area_left
        char_width = char_right - char_left
        if (area_width > charWid_limit[0]) and (area_width < charWid_limit[1]):
            char_addr_list.append((area_left, area_right, char_width))
    return char_addr_list

def get_chars(car_plate):
    img_h,img_w = car_plate.shape[:2]
    h_proj_list = [] # 水平投影长度列表
    h_temp_len,v_temp_len = 0,0
    h_startIndex,h_end_index = 0,0 # 水平投影记索引
    h_proj_limit = [0.2,0.8] # 车牌在水平方向得轮廓长度少于20%或多余80%过滤掉
    char_imgs = []

    # 将二值化的车牌水平投影到Y轴，计算投影后的连续长度，连续投影长度可能不止一段
    h_count = [0 for i in range(img_h)]
    for row in range(img_h):
        temp_cnt = 0
        for col in range(img_w):
            if car_plate[row,col] == 255:
                temp_cnt += 1
        h_count[row] = temp_cnt
        if temp_cnt / img_w < h_proj_limit[0] or temp_cnt / img_w > h_proj_limit[1]:
            if h_temp_len != 0:
                h_end_index = row - 1
                h_proj_list.append((h_startIndex,h_end_index))
                h_temp_len = 0
            continue
        if temp_cnt > 0:
            if h_temp_len == 0:
                h_startIndex = row
                h_temp_len = 1
            else:
                h_temp_len += 1
        else:
            if h_temp_len > 0:
                h_end_index = row - 1
                h_proj_list.append((h_startIndex,h_end_index))
                h_temp_len = 0

    # 手动结束最后得水平投影长度累加
    if h_temp_len != 0:
        h_end_index = img_h - 1
        h_proj_list.append((h_startIndex, h_end_index))
    # 选出最长的投影，该投影长度占整个截取车牌高度的比值必须大于0.5
    h_maxIndex,h_maxHeight = 0,0
    for i,(start,end) in enumerate(h_proj_list):
        if h_maxHeight < (end - start):
            h_maxHeight = (end - start)
            h_maxIndex = i
    if h_maxHeight / img_h < 0.5:
        return char_imgs
    chars_top,chars_bottom = h_proj_list[h_maxIndex][0],h_proj_list[h_maxIndex][1]

    plates = car_plate[chars_top:chars_bottom + 1,:]
    cv2.imwrite('./carIdentityData/opencv_output/car.jpg',car_plate)
    cv2.imwrite('./carIdentityData/opencv_output/plate.jpg', plates)
    char_addr_list = horizontal_cut_chars(plates)

    for i,addr in enumerate(char_addr_list):
        char_img = car_plate[chars_top:chars_bottom + 1,addr[0]:addr[1]]


        #FIND
        char_img = cv2.resize(char_img,(20,20))
        #char_img = cv2.resize(char_img,(char_w,char_h))


        char_imgs.append(char_img)

        # FIND

        cv2.imshow(str(i),char_img)
        cv2.waitKey(0)

    return char_imgs

def extract_char(car_plate):
    gray_plate = cv2.cvtColor(car_plate,cv2.COLOR_BGR2GRAY)
    ret,binary_plate = cv2.threshold(gray_plate,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)#使用最大类间方差法将图像二值化，自适应找出最合适的阈值

    #FIND 黑白反转再来一遍
    #for i in range(len(binary_plate)):
    #    for j in range(len(binary_plate[i])):
    #        if(binary_plate[i,j]==255):
    #            binary_plate[i,j]=0
    #        else:
    #            binary_plate[i,j]=255

    cv2.imshow('extract_char_binary_plate',binary_plate)
    #cv2.waitKey(0)

    char_img_list = get_chars(binary_plate)

    cv2.imshow('1',binary_plate) #dengjie
    #cv2.waitKey(0)

    return char_img_list

#def cnn_select_carPlate(plate_list,model_path):
#    if len(plate_list) == 0:
#        return False,plate_list
#    g1 = tf.Graph()
#    sess1 = tf.compat.v1.Session(graph=g1)
#    with sess1.as_default():
#        with sess1.graph.as_default():
#            model_dir = os.path.dirname(model_path)
#            saver = tf.train.import_meta_graph(model_path)
#            saver.restore(sess1, tf.train.latest_checkpoint(model_dir))
#            graph = tf.get_default_graph()
#            net1_x_place = graph.get_tensor_by_name('x_place:0')
#            net1_keep_place = graph.get_tensor_by_name('keep_place:0')
#            net1_out = graph.get_tensor_by_name('out_put:0')

#            input_x = np.array(plate_list)
#            net_outs = tf.nn.softmax(net1_out)
#            preds = tf.argmax(net_outs,1) #预测结果
#            probs = tf.reduce_max(net_outs,reduction_indices=[1]) #结果概率值
#            pred_list,prob_list = sess1.run([preds,probs],feed_dict={net1_x_place:input_x,net1_keep_place:1.0})
#            # 选出概率最大的车牌
#            result_index,result_prob = -1,0.
#            for i,pred in enumerate(pred_list):
#                if pred == 1 and prob_list[i] > result_prob:
#                    result_index,result_prob = i,prob_list[i]
#            if result_index == -1:
#                return False,plate_list[0]
#            else:
#                return True,plate_list[result_index]

def cnn_recongnize_char(img_list,model_path):
    g2 = tf.Graph()
    sess2 = tf.Session(graph=g2)
    text_list = []

    if len(img_list) == 0:
        return text_list
    with sess2.as_default():
        with sess2.graph.as_default():
            model_dir = os.path.dirname(model_path)
            saver = tf.train.import_meta_graph(model_path)
            saver.restore(sess2, tf.train.latest_checkpoint(model_dir))
            graph = tf.get_default_graph()
            net2_x_place = graph.get_tensor_by_name('x_place:0')
            net2_keep_place = graph.get_tensor_by_name('keep_place:0')
            net2_out = graph.get_tensor_by_name('out_put:0')

            data = np.array(img_list)
            # 数字、字母、汉字，从67维向量找到概率最大的作为预测结果
            net_out = tf.nn.softmax(net2_out)
            preds = tf.argmax(net_out,1)
            my_preds = sess2.run(preds, feed_dict={net2_x_place: data, net2_keep_place: 1.0})

            for i in my_preds:
                text_list.append(char_table[i])
            return text_list

if __name__ == '__main__':
    cur_dir = sys.path[0]
    car_plate_w,car_plate_h = 136,36
    char_w,char_h = 20,20
    plate_model_path = os.path.join(cur_dir, './carIdentityData/model/plate_recongnize/model.ckpt-1010.meta')
    char_model_path = os.path.join(cur_dir,'./carIdentityData/model/char_recongnize/model.ckpt-500.meta')
    
    for x in range(1):

        i = 222

        img = cv2.imread('./carIdentityData/images/' + str(i) + '.jpg')

        # 预处理
        pred_img = pre_process(img)

        # 车牌定位
        car_plate_list = locate_carPlate(img,pred_img)

        ## CNN车牌过滤
        #ret,car_plate = cnn_select_carPlate(car_plate_list,plate_model_path)
        #if ret == False:
        #    cv2.waitKey(0)
        #    print("未检测到车牌")
        #    sys.exit(-1)
        #cv2.imshow('cnn_plate',car_plate)

        for car_plate in car_plate_list:

            # 字符提取
            char_img_list = extract_char(car_plate)

            #cv2.waitKey(0)

            # CNN字符识别
            text = cnn_recongnize_char(char_img_list,char_model_path)
            print(text)

            cv2.waitKey(0)
