import cv2
import xlwt
import glob
import time
import difflib
import numpy as np
import ocr.params as params
import ocr.segment_process as osp
import ocr.post_tx as opt
import matplotlib.pyplot as plt
from ocr.clf_model import *
from ocr.db_model import *
from ocr.crnn_model import *
import ocr.pp_rec as opr
from ocr.crnn_nets.crnn_onnx import get_rotate_crop_image
from utils import utility


def convert_row_and_col(image, nrow_map, ncol_map, row_map, col_map):
    """
    将256*256的行列分割mask尺寸放缩为原始大小
    :param image: raw image, extension with ".jpg", 24bit RGB
    :param row_map: 8bit image, 0 or 255
    :param nrow_map: 8bit image, 0 or 255
    :param col_map: 8bit image, 0 or 255
    :param ncol_map: 8bit image, 0 or 255
    :return:  原始图像和原始图像尺寸大小的各行列分割mask
    """
    scale_image = image
    h, w, _ = scale_image.shape

    row_image = np.array(row_map).astype(np.uint8)
    row_image = cv2.resize(row_image, (w, h), interpolation=cv2.INTER_AREA)
    nrow_image = np.array(nrow_map).astype(np.uint8)
    nrow_image = cv2.resize(nrow_image, (w, h), interpolation=cv2.INTER_AREA)
    col_image = np.array(col_map).astype(np.uint8)
    col_image = cv2.resize(col_image, (w, h), interpolation=cv2.INTER_AREA)
    ncol_image = np.array(ncol_map).astype(np.uint8)
    ncol_image = cv2.resize(ncol_image, (w, h), interpolation=cv2.INTER_AREA)

    score_nrow = np.where(nrow_image > 128, nrow_image, 0)
    score_nrow = np.where(score_nrow < 128, score_nrow, 255)

    score_ncol = np.where(ncol_image > 128, ncol_image, 0)
    score_ncol = np.where(score_ncol < 128, score_ncol, 255)

    score_row = np.where(row_image > 128, row_image, 0)
    score_row = np.where(score_row < 128, score_row, 255)

    score_col = np.where(col_image > 128, col_image, 0)
    score_col = np.where(score_col < 128, score_col, 255)

    return scale_image, score_nrow, score_ncol, score_row, score_col


def find_row_straight_line(scale_image, score_nrow, score_row):
    """
    得到可见行与不可见行线的表示结果
    :param scale_image: 原始图像
    :param score_nrow: 不可见行图 0 or 255
    :param score_row:可见行图 0 or 255
    :return: 每条行线斜率k和截距b组成的列表
    """
    rmap = cv2.bitwise_and(score_nrow, score_row)
    h, w = rmap.shape
    # 查找图像上的轮廓
    row_contours, _ = cv2.findContours(rmap, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    # print(f'一共有{len(row_contours)}行轮廓')

    sort_row_contours = sorted(row_contours, key=lambda x: min(x[:, :, 1]))

    row_kbs = []  # 存储最终行线表示结果的变量
    row_rect_contours_kbs = []
    for row_contour in sort_row_contours[1:]:

        # 2021/9/2 Add: 过滤轮廓面积过小的值
        retval = cv2.contourArea(row_contour)
        if retval < int(0.001 * h * w):
            continue

        rect = cv2.minAreaRect(row_contour)  # rect: ((47.5, 435.5), (7.0, 5.0), 0.0)
        box_points = cv2.boxPoints(rect)
        draw_box_points = box_points[:, np.newaxis, :]
        draw_box_points = draw_box_points.astype(np.int32)
        # cv2.drawContours(scale_image, [draw_box_points], -1, (0, 0, 255), 3)
        # cv2.namedWindow('image', cv2.WINDOW_NORMAL)
        # cv2.imshow('image', scale_image)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        '''把矩形等效成一条直线，计算k，b
        由于是行的角度计算，范围应该在0度左右,应该使用最小外接矩形计算角度'''
        angle = rect[2]
        if 80 < angle < 100:
            angle = 90 + rect[2]
        rad = angle * np.pi / 180
        k = np.tan(rad)
        b = rect[0][1] - k * rect[0][0]
        # cv2.line(scale_image, (np.int(rect[0][0]), np.int(rect[0][1])),
        #          (np.int(rect[0][0]+1000*np.cos(rad)), np.int(rect[0][1]+1000*np.sin(rad))), color=(0,0,255), thickness=3)
        # cv2.namedWindow('image', cv2.WINDOW_NORMAL)
        # cv2.imshow('image', scale_image)
        # cv2.waitKey(0)
        row_rect_contours_kbs.append([draw_box_points, (k, b)])
        # row_kbs.append((k, b))

    # 按照矩形坐标中的y值将其进行排序
    row_rect_contours_kbs.sort(key=lambda x: np.min(x[0][:, :, 1]))

    for row_rect_contours_kb in row_rect_contours_kbs:
        kb = row_rect_contours_kb[1]
        row_kbs.append(kb)

    # 依次输出各行，查看结果
    # 按照检测到的轮廓y值进行排序
    # for row_rect_contour in row_rect_contours:
    #     cv2.drawContours(scale_image,[row_rect_contour],-1,(0,0,255),3)
    #     cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    #     cv2.imshow('image', scale_image)
    #     cv2.waitKey(0)

    return row_kbs


def find_col_straight_line(scale_image, score_ncol, score_col):
    """
    得到可见列与不可见列线的表示结果，当然也许不存在可见列
    :param scale_image: 原始图像
    :param score_ncol: 不可见列图 0 or 255
    :param score_col: 可见列图 0 or 255
    :return: 每条列线斜率k和截距b组成的列表
    """
    # 将图像中检测到的列合在一起
    cmap = cv2.bitwise_and(score_ncol, score_col)
    h, w = cmap.shape

    # 将轮廓点得到最小外接矩形（四点坐标）
    col_contours, _ = cv2.findContours(cmap, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # cv2.drawContours(scale_image, col_contours, -1, (0, 0, 255), 3)
    # cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    # cv2.imshow('image', scale_image)
    # cv2.waitKey(0)

    sort_col_contours = sorted(col_contours, key=lambda x: min(x[:, :, 0]))

    col_kbs = []
    col_rect_contours_kbs = []
    for col_contour in sort_col_contours[1:]:

        # 2021/9/2 Add: 求轮廓面积值
        retval = cv2.contourArea(col_contour)
        if retval < int(0.001 * h * w):
            continue

        # print(type(col_contour))
        # print(col_contour.dtype)
        # col_rect_contours.append(col_contour)
        rect = cv2.minAreaRect(col_contour)
        # print('rect:', rect)
        box_points = cv2.boxPoints(rect)
        # print('box points:', box_points)
        # print(type(box_points))
        # print(box_points.shape)
        draw_box_points = box_points[:, np.newaxis, :]
        draw_box_points = draw_box_points.astype(np.int32)
        # cv2.drawContours(scale_image, [draw_box_points], -1, (0, 0, 255), 3)
        # cv2.namedWindow('image', cv2.WINDOW_NORMAL)
        # cv2.imshow('image', scale_image)
        # cv2.waitKey(0)

        '''把矩形等效成一条直线，计算k，b
        由于是列的角度计算，范围应该在90度左右,应该使用最小外接矩形计算角度'''
        angle = rect[2]
        if -10 < angle < 10:
            angle = 90 + rect[2]
        # print('angle:', angle)
        # print('angle:', angle)
        rad = angle * np.pi / 180
        k = np.tan(rad)
        b = rect[0][1] - k * rect[0][0]
        # print('k:{},b:{}'.format(k, b))
        # cv2.line(scale_image, (np.int(rect[0][0]), np.int(rect[0][1])),
        #          (np.int(rect[0][0] + 1000 * np.cos(rad)), np.int(rect[0][1] + 1000 * np.sin(rad))), color=(0, 0, 255),
        #          thickness=3)
        # cv2.namedWindow('image', cv2.WINDOW_NORMAL)
        # cv2.imshow('image', scale_image)
        # cv2.waitKey(0)
        col_rect_contours_kbs.append([draw_box_points, (k, b)])
        # col_kbs.append((k,b))

    # 按照矩形坐标中的y值将其进行排序
    col_rect_contours_kbs.sort(key=lambda x: np.min(x[0][:, :, 0]))

    # 按照坐标x值对k，b值进行排序
    for col_rect_contours_kb in col_rect_contours_kbs:
        kb = col_rect_contours_kb[1]
        col_kbs.append(kb)
    # print('col rect contours:', col_rect_contours)
    # print('-'*20)

    # 依次输出各行，查看结果
    # 按照检测到的轮廓y值进行排序
    # for col_rect_contour in col_rect_contours:
    #     cv2.drawContours(scale_image, [col_rect_contour], -1, (0, 0, 255), 3)
    #     cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    #     cv2.imshow('image', scale_image)
    #     cv2.waitKey(0)
    return col_kbs


def recognize_key_content_col_straight_line(sorted_rect_row_kbs, sorted_rect_col_kbs, scale_image):
    """
    选择出包含‘项目名称’，‘结果’，‘单位’的列索引
    :param sorted_rect_row_kbs: 包含各行直线的k，b
    :param sorted_rect_col_kbs: 包含各列直线的k，b
    :param scale_image: 原始图像
    :return: 代表‘项目名称’，‘结果’，‘单位’三种结果的索引
    """
    # print('行数：', len(sorted_rect_row_kbs))
    # print('列数：', len(sorted_rect_col_kbs))
    ''' 根据行列分割线计算四个交点组成四边形，并赋予所在行列索引值 '''
    index = 0
    col_flags = []  # 存储关键列索引
    row_flags = list(range(len(sorted_rect_row_kbs) - 1))  # 存储关键行索引，过滤“项目名称”，“检查结果”，“单位”三行
    for i in range(len(sorted_rect_row_kbs) - 1):
        row_1_kb = sorted_rect_row_kbs[i]
        row_2_kb = sorted_rect_row_kbs[i + 1]
        for j in range(len(sorted_rect_col_kbs) - 1):
            index += 1
            col_1_kb = sorted_rect_col_kbs[j]
            col_2_kb = sorted_rect_col_kbs[j + 1]

            ''' 计算四个交点 '''
            left_top_x = (col_1_kb[1] - row_1_kb[1]) / (row_1_kb[0] - col_1_kb[0])
            left_top_y = row_1_kb[0] * left_top_x + row_1_kb[1]
            right_top_x = (col_2_kb[1] - row_1_kb[1]) / (row_1_kb[0] - col_2_kb[0])
            right_top_y = row_1_kb[0] * right_top_x + row_1_kb[1]
            left_bottom_x = (col_1_kb[1] - row_2_kb[1]) / (row_2_kb[0] - col_1_kb[0])
            left_bottom_y = row_2_kb[0] * left_bottom_x + row_2_kb[1]
            right_bottom_x = (col_2_kb[1] - row_2_kb[1]) / (row_2_kb[0] - col_2_kb[0])
            right_bottom_y = row_2_kb[0] * right_bottom_x + row_2_kb[1]
            points = np.array([[left_top_x, left_top_y], [right_top_x, right_top_y],
                               [right_bottom_x, right_bottom_y], [left_bottom_x, left_bottom_y]])
            # print('points:', points)

            ''' 2022/1/5 add: 过滤小面积轮廓以及异常轮廓 '''
            if np.int(points[0, 1]) == np.int(points[1, 1]) == np.int(points[2, 1]) == np.int(points[3, 1]):
                continue
            x_min = min(left_top_x, right_top_x, left_bottom_x, right_bottom_x)
            x_max = max(left_top_x, right_top_x, left_bottom_x, right_bottom_x)
            y_min = min(left_top_y, right_top_y, left_bottom_y, right_bottom_y)
            y_max = max(left_top_y, right_top_y, left_bottom_y, right_bottom_y)
            if (x_max - x_min) * (y_max - y_min) < 50:
                continue
            # 过滤坐标中存在负值的四边形定点坐标
            # print('是否有坐标值小于0：', len(points[np.where(points < 0)]))
            if len(points[np.where(points < 0)]) > 0:
                continue

            ''' 注意不要在原图上画框，不然会影响后续识别结果，可用scale_image.copy()代替 '''
            ''' 依次显示所检框在原图中的位置表示 '''
            # points_expand = points[:, np.newaxis, :].astype(np.int32)
            # scale_image_copy = scale_image.copy()
            # cv2.drawContours(scale_image_copy, [points_expand], -1, (0, 0, 255), 3)
            # cv2.putText(scale_image_copy, '%d row, %d col' % (i + 1, j + 1),
            #                     (np.int(left_top_x), np.int(left_top_y)),
            #                     cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 255, 255), 2)
            # cv2.namedWindow('image', cv2.WINDOW_NORMAL)
            # cv2.imshow('image', scale_image_copy)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

            ''' 2021-12-27 todo:识别之前加上db检测效果或许会好些 '''
            # 切出小图->利用paddleOCR测试
            # import requests
            # import base64
            # import json
            # partImg_array = get_rotate_crop_image(scale_image, points.astype(np.float32))
            # partImg_encode = cv2.imencode('.jpg', partImg_array)[1]
            # partImg_base64 = base64.b64encode(partImg_encode.tostring()).decode('utf8')
            # data = {'images': [partImg_base64]}
            # headers = {"Content-type": "application/json"}
            # url = "http://127.0.0.1:8866/predict/chinese_ocr_db_crnn_mobile"
            # pred_result = requests.post(url=url, headers=headers, data=json.dumps(data))
            # pred_result = pred_result.json()['results']
            # # print(pred_result)
            # # breakpoint()
            # if len(pred_result[0]['data']) == 0:
            #     continue
            # pred_result = pred_result[0]['data'][0]['text']

            ''' 原先ocr-lstm识别 '''
            # pred_result = crnnRecWithBox(scale_image, points, crnn_handle, False, index=index)
            # print('pred result:', pred_result)

            ''' 2022-03-03: ppocr识别 '''
            from PIL import Image
            dst_dir = params.mistake_small_image_path + "crnn_part_img_rotated_{}.jpg".format(index)
            partImg_array = get_rotate_crop_image(scale_image, points.astype(np.float32))
            partImg = Image.fromarray(partImg_array).convert("RGB")
            partImg.save(dst_dir)
            pred_result = opr.main(dst_dir)

            # 过滤None值的识别结果
            if pred_result is None:
                continue

            # 根据结果判断，保留项目名称，结果，单位三列
            if difflib.SequenceMatcher(None, '项目名称', pred_result).ratio() >= 0.5:
                col_flags.append(j)
                row_flags.remove(i)
            elif difflib.SequenceMatcher(None, '检查结果', pred_result).ratio() >= 0.5:
                col_flags.append(j)
            elif difflib.SequenceMatcher(None, '单位', pred_result).ratio() >= 0.5:
                col_flags.append(j)

    return row_flags, col_flags


def get_proper_result_straight_line(sorted_straight_row_line, sorted_straight_col_line, row_flags, col_flags, scale_image):
    """
    从原图中识别关键列‘项目名称’，‘结果’，‘参考范围’的结果
    :param sorted_straight_row_line: 包含各行直线的k，b
    :param sorted_straight_col_line: 包含各列直线的k，b
    :param col_flags: 关键列
    :param scale_image: 原始图像
    :return: 顺序识别关键列结果的列表
    """
    index = 0
    # 用于前台对接
    output_result = []

    # output_result = ''

    ''' 适用于中间有分栏的图像 '''
    print('row flags:', set(row_flags))
    print('col flags:', set(col_flags))
    col_flags = list(set(col_flags))
    row_flags = list(set(row_flags))
    part_num = len(col_flags) // 3

    # 用于前台对接
    # col_flags = list(set(col_flags))

    for num in range(part_num):
        # for i in range(1, len(sorted_straight_row_line) - 1):
        for i in row_flags:
            row_1_kb = sorted_straight_row_line[i]
            row_2_kb = sorted_straight_row_line[i + 1]
            # for j in col_flags[3 * num:3 * (num + 1)]:
            # for j in col_flags[num:(num + 1)]:
            for j in col_flags:
                index += 1
                # 用于前台对接
                project = {}

                col_1_kb = sorted_straight_col_line[j]
                col_2_kb = sorted_straight_col_line[j + 1]

                # 计算四个交点
                left_top_x = (col_1_kb[1] - row_1_kb[1]) / (row_1_kb[0] - col_1_kb[0])
                left_top_y = row_1_kb[0] * left_top_x + row_1_kb[1]
                # print('left-top point:x1:{},y1:{}'.format(x1,y1))
                right_top_x = (col_2_kb[1] - row_1_kb[1]) / (row_1_kb[0] - col_2_kb[0])
                right_top_y = row_1_kb[0] * right_top_x + row_1_kb[1]
                left_bottom_x = (col_1_kb[1] - row_2_kb[1]) / (row_2_kb[0] - col_1_kb[0])
                left_bottom_y = row_2_kb[0] * left_bottom_x + row_2_kb[1]
                right_bottom_x = (col_2_kb[1] - row_2_kb[1]) / (row_2_kb[0] - col_2_kb[0])
                right_bottom_y = row_2_kb[0] * right_bottom_x + row_2_kb[1]
                points = np.array([[left_top_x, left_top_y], [right_top_x, right_top_y],
                                   [right_bottom_x, right_bottom_y], [left_bottom_x, left_bottom_y]])
                # print('points:', points)

                '''' 调用百度db+crnn '''
                # import requests
                # import base64
                # import json
                # partImg_array = get_rotate_crop_image(scale_image, points.astype(np.float32))
                # partImg_encode = cv2.imencode('.jpg', partImg_array)[1]
                # partImg_base64 = base64.b64encode(partImg_encode.tostring()).decode('utf8')
                # data = {'images': [partImg_base64]}
                # headers = {"Content-type": "application/json"}
                # url = "http://127.0.0.1:8866/predict/chinese_ocr_db_crnn_mobile"
                # pred_result = requests.post(url=url, headers=headers, data=json.dumps(data))
                # pred_result = pred_result.json()['results']
                # print(pred_result)
                # if len(pred_result[0]['data']) == 0:
                #     continue
                # pred_result = pred_result[0]['data'][0]['text']

                # pred_result = crnnRecWithBox(scale_image, points, crnn_handle)
                # print(pred_result)

                ''' 2022-03-03: ppocr识别 '''
                from PIL import Image
                dst_dir = params.mistake_small_image_path + "crnn_part_img_rotated_{}.jpg".format(index)
                partImg_array = get_rotate_crop_image(scale_image, points.astype(np.float32))
                partImg = Image.fromarray(partImg_array).convert("RGB")
                partImg.save(dst_dir)
                pred_result = opr.main(dst_dir)

                if pred_result is None:
                    continue

                # output_result = output_result + pred_result

                # 用于前台对接
                points_list = []
                point_lt = {'location': 'lt'}
                point_lt['x'] = int(left_top_x)
                point_lt['y'] = int(left_top_y)
                point_rt = {'location': 'rt'}
                point_rt['x'] = int(right_top_x)
                point_rt['y'] = int(right_top_y)
                point_rb = {'location': 'rb'}
                point_rb['x'] = int(right_bottom_x)
                point_rb['y'] = int(right_bottom_y)
                point_lb = {'location': 'lb'}
                point_lb['x'] = int(left_bottom_x)
                point_lb['y'] = int(left_bottom_y)
                points_list.append(point_lt)
                points_list.append(point_rt)
                points_list.append(point_rb)
                points_list.append(point_lb)
                project['projectName'] = pred_result
                project['points'] = points_list
                output_result.append(project)

    return output_result


# def main_tencent(obj=None):
#     import cv2
#     import json
#     import xlwt
#     import base64
#     import difflib
#     from tencentcloud.common import credential
#     from tencentcloud.common.profile.client_profile import ClientProfile
#     from tencentcloud.common.profile.http_profile import HttpProfile
#     from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
#     from tencentcloud.ocr.v20181119 import ocr_client, models
#     try:
#         image = cv2.imdecode(np.array(bytearray(obj), dtype='uint8'), cv2.IMREAD_UNCHANGED)
#
#         # 2021/9/2 Add: 处理灰度图像
#         if len(image.shape) == 2:
#             image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
#         # 2021/11/1 Add: 处理32位4通道图像
#         if image.shape[-1] == 4:
#             image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
#         # image_np = cv2.imread("F:/laibo/Data/tijianbaogao/raw/z_hanglie/img_json/3.jpg")
#         image = cv2.imencode('.jpg', image)[1]
#         image_code = str(base64.b64encode(image))[2:-1]
#         cred = credential.Credential("AKIDEagAOPnl23GL9OTJuuepEUgIgpiOh9tN", "lNkp1vb0BBOyKwJSL5aolQ3SZO0MSIrN")
#         httpProfile = HttpProfile()
#         httpProfile.endpoint = "ocr.tencentcloudapi.com"
#
#         clientProfile = ClientProfile()
#         clientProfile.httpProfile = httpProfile
#         client = ocr_client.OcrClient(cred, "ap-shanghai", clientProfile)
#
#         req = models.RecognizeTableOCRRequest()
#         params = {
#             "ImageBase64": image_code,
#             # "ImageUrl": "https://ss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/u=1098397867,2553347606&fm=11&gp=0.jpg"
#         }
#         req.from_json_string(json.dumps(params))
#
#         resp = client.RecognizeTableOCR(req)
#         print(resp.to_json_string())
#         resp = json.loads(resp.to_json_string())
#
#     except TencentCloudSDKException as err:
#         print(err)
#
#     table_detections = resp['TableDetections']
#
#     row_col = {}
#     key_col = 0
#     workbook = xlwt.Workbook(encoding='utf-8')
#     worksheet = workbook.add_sheet('My Worksheet')
#     for cells in table_detections:
#         cell = cells['Cells']
#         for content in cell:
#             if content['Type'] != 'footer' and content['Type'] != 'header':
#                 # worksheet.write(content['RowTl'], content['ColTl'], label=content['Text'])
#                 if difflib.SequenceMatcher(None, content['Text'], '项目名称').ratio() > 0.5:
#                     key_col = int(content['ColTl'])
#     temp_line = []
#     for cells in table_detections:
#         cell = cells['Cells']
#         for content in cell:
#             print(content['ColTl'])
#             print(type(content['ColTl']))
#             if int(content['ColTl']) == key_col:
#                 temp_line.append(content)
#
#     return '', temp_line


def main(obj=None):
    try:
        image = cv2.imdecode(np.array(bytearray(obj), dtype='uint8'), cv2.IMREAD_UNCHANGED)

        '''对传入的不同图像类型转换为BGR'''
        # 2021/9/2 Add: 处理灰度图像
        if len(image.shape) == 2:
            image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        # 2021/11/1 Add: 处理32位4通道图像
        if image.shape[-1] == 4:
            image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)

        '''googlenet+tableImageParser+crnn'''
        start = time.time()

        '''1.分类体检报告模型调用'''
        type = clf_predict(image)
        print('至分类模型调用用时--', time.time() - start)

        '''2.行列分割和识别模型调用'''
        if type == 'project':
            # nrow_map, ncol_map, row_map, col_map = osp.generate_map(image, params.segment_model_path)
            nrow_map, ncol_map, row_map, col_map = osp.pbInference(image, params.segment_pb_model_path)
            print('至分割模型调用用时--', time.time() - start)

            score_nrow, score_ncol, score_row, score_col = opt.post_tx(nrow_map, ncol_map, row_map, col_map)
            scale_image, score_nrow, score_ncol, score_row, score_col = convert_row_and_col(image, score_nrow,
                                                                                              score_ncol, score_row,
                                                                                              score_col)
            print('至分割图后处理调用用时--', time.time() - start)

            row_straight_line = find_row_straight_line(image, score_nrow, score_row)
            col_straight_line = find_col_straight_line(image, score_ncol, score_col)
            print('至分割图行列提取调用用时--', time.time() - start)

            row_flags, col_flags = recognize_key_content_col_straight_line(row_straight_line, col_straight_line, scale_image)
            print('得到所需要列的序号用时--', time.time() - start)

            proper_result = get_proper_result_straight_line(row_straight_line, col_straight_line, row_flags, col_flags,
                                                            scale_image)
            # print(proper_result)
            print('至逐文本块识别调用用时--', time.time() - start)

            # # '''写入excel表'''
            # result_csv = xlwt.Workbook()
            # result_csv_sheet = result_csv.add_sheet('train_answer')
            # result_csv_sheet.write(0, 0, 'id')
            # result_csv_sheet.write(0, 1, 'content')
            # result_csv_sheet.write(1, 0, 'default')
            # result_csv_sheet.write(1, 1, proper_result)
            # result_csv.save('train_answer_1.csv')
        else:
            proper_result = ''
        return type, proper_result
    except Exception as e:
        print(e)
        return '', ''


if __name__ == '__main__':
    image_path = 'F:/laibo/Data/tijianbaogao/tip/raw/z_hanglie/img_json/3.jpg'
    obj = open(image_path, 'rb')
    main(obj.read())
