Spaces:
Running
Running
Upload 4 files
Browse files- app.py +172 -0
- requirements.txt +8 -0
- utils.py +226 -0
app.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
|
3 |
+
import cv2
|
4 |
+
import gradio as gr
|
5 |
+
from lineless_table_rec import LinelessTableRecognition
|
6 |
+
from paddleocr import PPStructure
|
7 |
+
from rapid_table import RapidTable
|
8 |
+
from rapidocr_onnxruntime import RapidOCR
|
9 |
+
from table_cls import TableCls
|
10 |
+
from wired_table_rec import WiredTableRecognition
|
11 |
+
from utils import plot_rec_box, LoadImage, format_html, box_4_2_poly_to_box_4_1
|
12 |
+
|
13 |
+
img_loader = LoadImage()
|
14 |
+
table_rec_path = "models/table_rec/ch_ppstructure_mobile_v2_SLANet.onnx"
|
15 |
+
det_model_dir = {
|
16 |
+
"mobile_det": "models/ocr/ch_PP-OCRv4_det_infer.onnx",
|
17 |
+
"server_det": "models/ocr/ch_PP-OCRv4_det_server_infer.onnx"
|
18 |
+
}
|
19 |
+
|
20 |
+
rec_model_dir = {
|
21 |
+
"mobile_rec": "models/ocr/ch_PP-OCRv4_rec_infer.onnx",
|
22 |
+
"server_rec": "models/ocr/ch_PP-OCRv4_rec_server_infer.onnx"
|
23 |
+
}
|
24 |
+
table_engine_list = [
|
25 |
+
"auto",
|
26 |
+
"rapid_table",
|
27 |
+
"wired_table_v2",
|
28 |
+
"pp_table",
|
29 |
+
"wired_table_v1",
|
30 |
+
"lineless_table"
|
31 |
+
]
|
32 |
+
|
33 |
+
# 示例图片路径
|
34 |
+
example_images = [
|
35 |
+
"images/lineless1.png",
|
36 |
+
"images/wired1.png",
|
37 |
+
"images/lineless2.png",
|
38 |
+
"images/wired2.png",
|
39 |
+
"images/lineless3.jpg",
|
40 |
+
"images/wired3.png",
|
41 |
+
]
|
42 |
+
rapid_table_engine = RapidTable(model_path=table_rec_path)
|
43 |
+
wired_table_engine_v1 = WiredTableRecognition(version="v1")
|
44 |
+
wired_table_engine_v2 = WiredTableRecognition(version="v2")
|
45 |
+
lineless_table_engine = LinelessTableRecognition()
|
46 |
+
table_cls = TableCls()
|
47 |
+
ocr_engine_dict = {}
|
48 |
+
pp_engine_dict = {}
|
49 |
+
for det_model in det_model_dir.keys():
|
50 |
+
for rec_model in rec_model_dir.keys():
|
51 |
+
det_model_path = det_model_dir[det_model]
|
52 |
+
rec_model_path = rec_model_dir[rec_model]
|
53 |
+
key = f"{det_model}_{rec_model}"
|
54 |
+
ocr_engine_dict[key] = RapidOCR(det_model_path=det_model_path, rec_model_path=rec_model_path)
|
55 |
+
pp_engine_dict[key] = PPStructure(
|
56 |
+
layout=False,
|
57 |
+
show_log=False,
|
58 |
+
table=True,
|
59 |
+
use_onnx=True,
|
60 |
+
table_model_dir=table_rec_path,
|
61 |
+
det_model_dir=det_model_path,
|
62 |
+
rec_model_dir=rec_model_path
|
63 |
+
)
|
64 |
+
|
65 |
+
|
66 |
+
def select_ocr_model(det_model, rec_model):
|
67 |
+
return ocr_engine_dict[f"{det_model}_{rec_model}"]
|
68 |
+
|
69 |
+
|
70 |
+
def select_table_model(img, table_engine_type, det_model, rec_model):
|
71 |
+
if table_engine_type == "rapid_table":
|
72 |
+
return rapid_table_engine, 0
|
73 |
+
elif table_engine_type == "wired_table_v1":
|
74 |
+
return wired_table_engine_v1, 0
|
75 |
+
elif table_engine_type == "wired_table_v2":
|
76 |
+
print("使用v2 wired table")
|
77 |
+
return wired_table_engine_v2, 0
|
78 |
+
elif table_engine_type == "lineless_table":
|
79 |
+
return lineless_table_engine, 0
|
80 |
+
elif table_engine_type == "pp_table":
|
81 |
+
return pp_engine_dict[f"{det_model}_{rec_model}"], 0
|
82 |
+
elif table_engine_type == "auto":
|
83 |
+
cls, elasp = table_cls(img)
|
84 |
+
if cls == 'wired':
|
85 |
+
table_engine = wired_table_engine_v2
|
86 |
+
else:
|
87 |
+
table_engine = lineless_table_engine
|
88 |
+
return table_engine, elasp
|
89 |
+
|
90 |
+
|
91 |
+
def process_image(img, table_engine_type, det_model, rec_model):
|
92 |
+
img = img_loader(img)
|
93 |
+
start = time.time()
|
94 |
+
table_engine, select_elapse = select_table_model(img, table_engine_type, det_model, rec_model)
|
95 |
+
ocr_engine = select_ocr_model(det_model, rec_model)
|
96 |
+
|
97 |
+
if isinstance(table_engine, PPStructure):
|
98 |
+
result = table_engine(img, return_ocr_result_in_table=True)
|
99 |
+
html = result[0]['res']['html']
|
100 |
+
polygons = result[0]['res']['cell_bbox']
|
101 |
+
polygons = [[polygon[0], polygon[1], polygon[4], polygon[5]] for polygon in polygons]
|
102 |
+
ocr_boxes = result[0]['res']['boxes']
|
103 |
+
all_elapse = f"- `table all cost: {time.time() - start:.5f}`"
|
104 |
+
else:
|
105 |
+
ocr_res, ocr_infer_elapse = ocr_engine(img)
|
106 |
+
det_cost, cls_cost, rec_cost = ocr_infer_elapse
|
107 |
+
ocr_boxes = [box_4_2_poly_to_box_4_1(ori_ocr[0]) for ori_ocr in ocr_res]
|
108 |
+
|
109 |
+
if isinstance(table_engine, RapidTable):
|
110 |
+
html, polygons, table_rec_elapse = table_engine(img, ocr_result=ocr_res)
|
111 |
+
elif isinstance(table_engine, (WiredTableRecognition, LinelessTableRecognition)):
|
112 |
+
html, table_rec_elapse, polygons, _, _ = table_engine(img, ocr_result=ocr_res)
|
113 |
+
|
114 |
+
sum_elapse = time.time() - start
|
115 |
+
all_elapse = f"- table all cost: {sum_elapse:.5f}\n - table rec cost: {table_rec_elapse:.5f}\n - ocr cost: {det_cost + cls_cost + rec_cost:.5f}"
|
116 |
+
|
117 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
118 |
+
table_boxes_img = plot_rec_box(img.copy(), polygons)
|
119 |
+
ocr_boxes_img = plot_rec_box(img.copy(), ocr_boxes)
|
120 |
+
complete_html = format_html(html)
|
121 |
+
|
122 |
+
return complete_html, table_boxes_img, ocr_boxes_img, all_elapse
|
123 |
+
|
124 |
+
|
125 |
+
def main():
|
126 |
+
det_models_labels = list(det_model_dir.keys())
|
127 |
+
rec_models_labels = list(rec_model_dir.keys())
|
128 |
+
|
129 |
+
with gr.Blocks() as demo:
|
130 |
+
with gr.Row(): # 两列布局
|
131 |
+
with gr.Column(): # 左边列
|
132 |
+
img_input = gr.Image(label="Upload or Select Image", sources="upload")
|
133 |
+
|
134 |
+
# 示例图片选择器
|
135 |
+
examples = gr.Examples(
|
136 |
+
examples=example_images,
|
137 |
+
inputs=img_input,
|
138 |
+
fn=lambda x: x, # 简单返回图片路径
|
139 |
+
outputs=img_input,
|
140 |
+
cache_examples=True
|
141 |
+
)
|
142 |
+
|
143 |
+
table_engine_type = gr.Dropdown(table_engine_list, label="Select Table Recognition Engine",
|
144 |
+
value=table_engine_list[0])
|
145 |
+
det_model = gr.Dropdown(det_models_labels, label="Select OCR Detection Model",
|
146 |
+
value=det_models_labels[0])
|
147 |
+
rec_model = gr.Dropdown(rec_models_labels, label="Select OCR Recognition Model",
|
148 |
+
value=rec_models_labels[0])
|
149 |
+
|
150 |
+
run_button = gr.Button("Run")
|
151 |
+
gr.Markdown("# Elapsed Time")
|
152 |
+
elapse_text = gr.Text(label="") # 使用 `gr.Text` 组件展示字符串
|
153 |
+
with gr.Column(): # 右边列
|
154 |
+
# 使用 Markdown 标题分隔各个组件
|
155 |
+
gr.Markdown("# Html Render")
|
156 |
+
html_output = gr.HTML(label="", elem_classes="scrollable-container")
|
157 |
+
gr.Markdown("# Table Boxes")
|
158 |
+
table_boxes_output = gr.Image(label="")
|
159 |
+
gr.Markdown("# OCR Boxes")
|
160 |
+
ocr_boxes_output = gr.Image(label="")
|
161 |
+
|
162 |
+
run_button.click(
|
163 |
+
fn=process_image,
|
164 |
+
inputs=[img_input, table_engine_type, det_model, rec_model],
|
165 |
+
outputs=[html_output, table_boxes_output, ocr_boxes_output, elapse_text]
|
166 |
+
)
|
167 |
+
|
168 |
+
demo.launch()
|
169 |
+
|
170 |
+
|
171 |
+
if __name__ == '__main__':
|
172 |
+
main()
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
wired_table_rec
|
2 |
+
lineless_table_rec
|
3 |
+
table_cls
|
4 |
+
rapid_table
|
5 |
+
paddleocr>=2.6.0.3
|
6 |
+
paddlepaddle
|
7 |
+
rapidocr_onnxruntime
|
8 |
+
gradio
|
utils.py
ADDED
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from io import BytesIO
|
2 |
+
from pathlib import Path
|
3 |
+
from typing import Union, List
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import cv2
|
7 |
+
from PIL import UnidentifiedImageError, Image
|
8 |
+
|
9 |
+
InputType = Union[str, np.ndarray, bytes, Path, Image.Image]
|
10 |
+
|
11 |
+
|
12 |
+
class LoadImage:
|
13 |
+
def __init__(
|
14 |
+
self,
|
15 |
+
):
|
16 |
+
pass
|
17 |
+
|
18 |
+
def __call__(self, img: InputType) -> np.ndarray:
|
19 |
+
if not isinstance(img, InputType.__args__):
|
20 |
+
raise LoadImageError(
|
21 |
+
f"The img type {type(img)} does not in {InputType.__args__}"
|
22 |
+
)
|
23 |
+
|
24 |
+
origin_img_type = type(img)
|
25 |
+
img = self.load_img(img)
|
26 |
+
img = self.convert_img(img, origin_img_type)
|
27 |
+
return img
|
28 |
+
|
29 |
+
def load_img(self, img: InputType) -> np.ndarray:
|
30 |
+
if isinstance(img, (str, Path)):
|
31 |
+
self.verify_exist(img)
|
32 |
+
try:
|
33 |
+
img = np.array(Image.open(img))
|
34 |
+
except UnidentifiedImageError as e:
|
35 |
+
raise LoadImageError(f"cannot identify image file {img}") from e
|
36 |
+
return img
|
37 |
+
|
38 |
+
if isinstance(img, bytes):
|
39 |
+
img = np.array(Image.open(BytesIO(img)))
|
40 |
+
return img
|
41 |
+
if isinstance(img, BytesIO):
|
42 |
+
img = np.array(Image.open(img))
|
43 |
+
return img
|
44 |
+
if isinstance(img, np.ndarray):
|
45 |
+
return img
|
46 |
+
|
47 |
+
if isinstance(img, Image.Image):
|
48 |
+
return np.array(img)
|
49 |
+
|
50 |
+
raise LoadImageError(f"{type(img)} is not supported!")
|
51 |
+
|
52 |
+
def convert_img(self, img: np.ndarray, origin_img_type):
|
53 |
+
if img.ndim == 2:
|
54 |
+
return cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
55 |
+
|
56 |
+
if img.ndim == 3:
|
57 |
+
channel = img.shape[2]
|
58 |
+
if channel == 1:
|
59 |
+
return cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
60 |
+
|
61 |
+
if channel == 2:
|
62 |
+
return self.cvt_two_to_three(img)
|
63 |
+
|
64 |
+
if channel == 3:
|
65 |
+
if issubclass(origin_img_type, (str, Path, bytes, Image.Image)):
|
66 |
+
return cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
67 |
+
return img
|
68 |
+
|
69 |
+
if channel == 4:
|
70 |
+
return self.cvt_four_to_three(img)
|
71 |
+
|
72 |
+
raise LoadImageError(
|
73 |
+
f"The channel({channel}) of the img is not in [1, 2, 3, 4]"
|
74 |
+
)
|
75 |
+
|
76 |
+
raise LoadImageError(f"The ndim({img.ndim}) of the img is not in [2, 3]")
|
77 |
+
|
78 |
+
@staticmethod
|
79 |
+
def cvt_two_to_three(img: np.ndarray) -> np.ndarray:
|
80 |
+
"""gray + alpha → BGR"""
|
81 |
+
img_gray = img[..., 0]
|
82 |
+
img_bgr = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2BGR)
|
83 |
+
|
84 |
+
img_alpha = img[..., 1]
|
85 |
+
not_a = cv2.bitwise_not(img_alpha)
|
86 |
+
not_a = cv2.cvtColor(not_a, cv2.COLOR_GRAY2BGR)
|
87 |
+
|
88 |
+
new_img = cv2.bitwise_and(img_bgr, img_bgr, mask=img_alpha)
|
89 |
+
new_img = cv2.add(new_img, not_a)
|
90 |
+
return new_img
|
91 |
+
|
92 |
+
@staticmethod
|
93 |
+
def cvt_four_to_three(img: np.ndarray) -> np.ndarray:
|
94 |
+
"""RGBA → BGR"""
|
95 |
+
r, g, b, a = cv2.split(img)
|
96 |
+
new_img = cv2.merge((b, g, r))
|
97 |
+
|
98 |
+
not_a = cv2.bitwise_not(a)
|
99 |
+
not_a = cv2.cvtColor(not_a, cv2.COLOR_GRAY2BGR)
|
100 |
+
|
101 |
+
new_img = cv2.bitwise_and(new_img, new_img, mask=a)
|
102 |
+
new_img = cv2.add(new_img, not_a)
|
103 |
+
return new_img
|
104 |
+
|
105 |
+
@staticmethod
|
106 |
+
def verify_exist(file_path: Union[str, Path]):
|
107 |
+
if not Path(file_path).exists():
|
108 |
+
raise LoadImageError(f"{file_path} does not exist.")
|
109 |
+
|
110 |
+
|
111 |
+
class LoadImageError(Exception):
|
112 |
+
pass
|
113 |
+
|
114 |
+
|
115 |
+
def plot_rec_box_with_logic_info(img_path, logic_points, sorted_polygons, without_text=True):
|
116 |
+
"""
|
117 |
+
:param img_path
|
118 |
+
:param output_path
|
119 |
+
:param logic_points: [row_start,row_end,col_start,col_end]
|
120 |
+
:param sorted_polygons: [xmin,ymin,xmax,ymax]
|
121 |
+
:return:
|
122 |
+
"""
|
123 |
+
# 读取原图
|
124 |
+
img = cv2.imread(img_path)
|
125 |
+
img = cv2.copyMakeBorder(
|
126 |
+
img, 0, 0, 0, 100, cv2.BORDER_CONSTANT, value=[255, 255, 255]
|
127 |
+
)
|
128 |
+
# 绘制 polygons 矩形
|
129 |
+
for idx, polygon in enumerate(sorted_polygons):
|
130 |
+
x0, y0, x1, y1 = polygon[0], polygon[1], polygon[2], polygon[3]
|
131 |
+
x0 = round(x0)
|
132 |
+
y0 = round(y0)
|
133 |
+
x1 = round(x1)
|
134 |
+
y1 = round(y1)
|
135 |
+
cv2.rectangle(img, (x0, y0), (x1, y1), (0, 0, 255), 1)
|
136 |
+
# 增大字体大小和线宽
|
137 |
+
font_scale = 1.0 # 原先是0.5
|
138 |
+
thickness = 2 # 原先是1
|
139 |
+
if without_text:
|
140 |
+
return img
|
141 |
+
cv2.putText(
|
142 |
+
img,
|
143 |
+
f"{idx}",
|
144 |
+
(x1, y1),
|
145 |
+
cv2.FONT_HERSHEY_PLAIN,
|
146 |
+
font_scale,
|
147 |
+
(0, 0, 255),
|
148 |
+
thickness,
|
149 |
+
)
|
150 |
+
return img
|
151 |
+
|
152 |
+
|
153 |
+
def plot_rec_box(img, sorted_polygons):
|
154 |
+
"""
|
155 |
+
:param img_path
|
156 |
+
:param output_path
|
157 |
+
:param sorted_polygons: [xmin,ymin,xmax,ymax]
|
158 |
+
:return:
|
159 |
+
"""
|
160 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
161 |
+
# 处理ocr_res
|
162 |
+
img = cv2.copyMakeBorder(
|
163 |
+
img, 0, 0, 0, 100, cv2.BORDER_CONSTANT, value=[255, 255, 255]
|
164 |
+
)
|
165 |
+
# 绘制 ocr_res 矩形
|
166 |
+
for idx, polygon in enumerate(sorted_polygons):
|
167 |
+
x0, y0, x1, y1 = polygon[0], polygon[1], polygon[2], polygon[3]
|
168 |
+
x0 = round(x0)
|
169 |
+
y0 = round(y0)
|
170 |
+
x1 = round(x1)
|
171 |
+
y1 = round(y1)
|
172 |
+
cv2.rectangle(img, (x0, y0), (x1, y1), (0, 0, 255), 1)
|
173 |
+
# 增大字体大小和线宽
|
174 |
+
font_scale = 1.0 # 原先是0.5
|
175 |
+
thickness = 2 # 原先是1
|
176 |
+
|
177 |
+
# cv2.putText(
|
178 |
+
# img,
|
179 |
+
# str(idx),
|
180 |
+
# (x1, y1),
|
181 |
+
# cv2.FONT_HERSHEY_PLAIN,
|
182 |
+
# font_scale,
|
183 |
+
# (0, 0, 255),
|
184 |
+
# thickness,
|
185 |
+
# )
|
186 |
+
return img
|
187 |
+
|
188 |
+
def format_html(html:str):
|
189 |
+
html = html.replace("<html>","")
|
190 |
+
html = html.replace("</html>","")
|
191 |
+
html = html.replace("<body>", "")
|
192 |
+
html = html.replace("</body>", "")
|
193 |
+
return f"""
|
194 |
+
<!DOCTYPE html>
|
195 |
+
<html lang="zh-CN">
|
196 |
+
<head>
|
197 |
+
<meta charset="UTF-8">
|
198 |
+
<title>Complex Table Example</title>
|
199 |
+
<style>
|
200 |
+
table {{
|
201 |
+
border-collapse: collapse;
|
202 |
+
width: 100%;
|
203 |
+
}}
|
204 |
+
th, td {{
|
205 |
+
border: 1px solid black;
|
206 |
+
padding: 8px;
|
207 |
+
text-align: center;
|
208 |
+
}}
|
209 |
+
th {{
|
210 |
+
background-color: #f2f2f2;
|
211 |
+
}}
|
212 |
+
</style>
|
213 |
+
</head>
|
214 |
+
<body>
|
215 |
+
{html}
|
216 |
+
</body>
|
217 |
+
</html>
|
218 |
+
"""
|
219 |
+
|
220 |
+
def box_4_2_poly_to_box_4_1(poly_box: Union[np.ndarray, list]) -> List[float]:
|
221 |
+
"""
|
222 |
+
将poly_box转换为box_4_1
|
223 |
+
:param poly_box:
|
224 |
+
:return:
|
225 |
+
"""
|
226 |
+
return [poly_box[0][0], poly_box[0][1], poly_box[2][0], poly_box[2][1]]
|