Spaces:
Runtime error
Runtime error
adirathor07
commited on
Commit
β’
153628e
1
Parent(s):
6e88fde
added doctr folder
Browse filesThis view is limited to 50 files because it contains too many changes. Β
See raw diff
- doctr/__init__.py +3 -0
- doctr/__pycache__/__init__.cpython-310.pyc +0 -0
- doctr/__pycache__/__init__.cpython-311.pyc +0 -0
- doctr/__pycache__/__init__.cpython-38.pyc +0 -0
- doctr/__pycache__/file_utils.cpython-310.pyc +0 -0
- doctr/__pycache__/file_utils.cpython-311.pyc +0 -0
- doctr/__pycache__/file_utils.cpython-38.pyc +0 -0
- doctr/__pycache__/version.cpython-311.pyc +0 -0
- doctr/__pycache__/version.cpython-38.pyc +0 -0
- doctr/contrib/__init__.py +0 -0
- doctr/contrib/__pycache__/__init__.cpython-311.pyc +0 -0
- doctr/contrib/__pycache__/__init__.cpython-38.pyc +0 -0
- doctr/contrib/artefacts.py +131 -0
- doctr/contrib/base.py +105 -0
- doctr/datasets/__init__.py +26 -0
- doctr/datasets/__pycache__/__init__.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/__init__.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/cord.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/cord.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/detection.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/detection.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/doc_artefacts.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/doc_artefacts.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/funsd.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/funsd.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/ic03.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/ic03.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/ic13.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/ic13.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/iiit5k.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/iiit5k.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/iiithws.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/iiithws.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/imgur5k.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/imgur5k.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/loader.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/loader.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/mjsynth.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/mjsynth.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/ocr.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/ocr.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/orientation.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/orientation.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/recognition.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/recognition.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/sroie.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/sroie.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/svhn.cpython-311.pyc +0 -0
- doctr/datasets/__pycache__/svhn.cpython-38.pyc +0 -0
- doctr/datasets/__pycache__/svt.cpython-311.pyc +0 -0
doctr/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from . import io, models, datasets, contrib, transforms, utils
|
2 |
+
from .file_utils import is_tf_available, is_torch_available
|
3 |
+
from .version import __version__ # noqa: F401
|
doctr/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (382 Bytes). View file
|
|
doctr/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (499 Bytes). View file
|
|
doctr/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (399 Bytes). View file
|
|
doctr/__pycache__/file_utils.cpython-310.pyc
ADDED
Binary file (2.56 kB). View file
|
|
doctr/__pycache__/file_utils.cpython-311.pyc
ADDED
Binary file (4.23 kB). View file
|
|
doctr/__pycache__/file_utils.cpython-38.pyc
ADDED
Binary file (2.59 kB). View file
|
|
doctr/__pycache__/version.cpython-311.pyc
ADDED
Binary file (198 Bytes). View file
|
|
doctr/__pycache__/version.cpython-38.pyc
ADDED
Binary file (181 Bytes). View file
|
|
doctr/contrib/__init__.py
ADDED
File without changes
|
doctr/contrib/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (185 Bytes). View file
|
|
doctr/contrib/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (167 Bytes). View file
|
|
doctr/contrib/artefacts.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2021-2024, Mindee.
|
2 |
+
|
3 |
+
# This program is licensed under the Apache License 2.0.
|
4 |
+
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
5 |
+
|
6 |
+
from typing import Any, Dict, List, Optional, Tuple
|
7 |
+
|
8 |
+
import cv2
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
from doctr.file_utils import requires_package
|
12 |
+
|
13 |
+
from .base import _BasePredictor
|
14 |
+
|
15 |
+
__all__ = ["ArtefactDetector"]
|
16 |
+
|
17 |
+
default_cfgs: Dict[str, Dict[str, Any]] = {
|
18 |
+
"yolov8_artefact": {
|
19 |
+
"input_shape": (3, 1024, 1024),
|
20 |
+
"labels": ["bar_code", "qr_code", "logo", "photo"],
|
21 |
+
"url": "https://doctr-static.mindee.com/models?id=v0.8.1/yolo_artefact-f9d66f14.onnx&src=0",
|
22 |
+
},
|
23 |
+
}
|
24 |
+
|
25 |
+
|
26 |
+
class ArtefactDetector(_BasePredictor):
|
27 |
+
"""
|
28 |
+
A class to detect artefacts in images
|
29 |
+
|
30 |
+
>>> from doctr.io import DocumentFile
|
31 |
+
>>> from doctr.contrib.artefacts import ArtefactDetector
|
32 |
+
>>> doc = DocumentFile.from_images(["path/to/image.jpg"])
|
33 |
+
>>> detector = ArtefactDetector()
|
34 |
+
>>> results = detector(doc)
|
35 |
+
|
36 |
+
Args:
|
37 |
+
----
|
38 |
+
arch: the architecture to use
|
39 |
+
batch_size: the batch size to use
|
40 |
+
model_path: the path to the model to use
|
41 |
+
labels: the labels to use
|
42 |
+
input_shape: the input shape to use
|
43 |
+
mask_labels: the mask labels to use
|
44 |
+
conf_threshold: the confidence threshold to use
|
45 |
+
iou_threshold: the intersection over union threshold to use
|
46 |
+
**kwargs: additional arguments to be passed to `download_from_url`
|
47 |
+
"""
|
48 |
+
|
49 |
+
def __init__(
|
50 |
+
self,
|
51 |
+
arch: str = "yolov8_artefact",
|
52 |
+
batch_size: int = 2,
|
53 |
+
model_path: Optional[str] = None,
|
54 |
+
labels: Optional[List[str]] = None,
|
55 |
+
input_shape: Optional[Tuple[int, int, int]] = None,
|
56 |
+
conf_threshold: float = 0.5,
|
57 |
+
iou_threshold: float = 0.5,
|
58 |
+
**kwargs: Any,
|
59 |
+
) -> None:
|
60 |
+
super().__init__(batch_size=batch_size, url=default_cfgs[arch]["url"], model_path=model_path, **kwargs)
|
61 |
+
self.labels = labels or default_cfgs[arch]["labels"]
|
62 |
+
self.input_shape = input_shape or default_cfgs[arch]["input_shape"]
|
63 |
+
self.conf_threshold = conf_threshold
|
64 |
+
self.iou_threshold = iou_threshold
|
65 |
+
|
66 |
+
def preprocess(self, img: np.ndarray) -> np.ndarray:
|
67 |
+
return np.transpose(cv2.resize(img, (self.input_shape[2], self.input_shape[1])), (2, 0, 1)) / np.array(255.0)
|
68 |
+
|
69 |
+
def postprocess(self, output: List[np.ndarray], input_images: List[List[np.ndarray]]) -> List[List[Dict[str, Any]]]:
|
70 |
+
results = []
|
71 |
+
|
72 |
+
for batch in zip(output, input_images):
|
73 |
+
for out, img in zip(batch[0], batch[1]):
|
74 |
+
org_height, org_width = img.shape[:2]
|
75 |
+
width_scale, height_scale = org_width / self.input_shape[2], org_height / self.input_shape[1]
|
76 |
+
for res in out:
|
77 |
+
sample_results = []
|
78 |
+
for row in np.transpose(np.squeeze(res)):
|
79 |
+
classes_scores = row[4:]
|
80 |
+
max_score = np.amax(classes_scores)
|
81 |
+
if max_score >= self.conf_threshold:
|
82 |
+
class_id = np.argmax(classes_scores)
|
83 |
+
x, y, w, h = row[0], row[1], row[2], row[3]
|
84 |
+
# to rescaled xmin, ymin, xmax, ymax
|
85 |
+
xmin = int((x - w / 2) * width_scale)
|
86 |
+
ymin = int((y - h / 2) * height_scale)
|
87 |
+
xmax = int((x + w / 2) * width_scale)
|
88 |
+
ymax = int((y + h / 2) * height_scale)
|
89 |
+
|
90 |
+
sample_results.append({
|
91 |
+
"label": self.labels[class_id],
|
92 |
+
"confidence": float(max_score),
|
93 |
+
"box": [xmin, ymin, xmax, ymax],
|
94 |
+
})
|
95 |
+
|
96 |
+
# Filter out overlapping boxes
|
97 |
+
boxes = [res["box"] for res in sample_results]
|
98 |
+
scores = [res["confidence"] for res in sample_results]
|
99 |
+
keep_indices = cv2.dnn.NMSBoxes(boxes, scores, self.conf_threshold, self.iou_threshold) # type: ignore[arg-type]
|
100 |
+
sample_results = [sample_results[i] for i in keep_indices]
|
101 |
+
|
102 |
+
results.append(sample_results)
|
103 |
+
|
104 |
+
self._results = results
|
105 |
+
return results
|
106 |
+
|
107 |
+
def show(self, **kwargs: Any) -> None:
|
108 |
+
"""
|
109 |
+
Display the results
|
110 |
+
|
111 |
+
Args:
|
112 |
+
----
|
113 |
+
**kwargs: additional keyword arguments to be passed to `plt.show`
|
114 |
+
"""
|
115 |
+
requires_package("matplotlib", "`.show()` requires matplotlib installed")
|
116 |
+
import matplotlib.pyplot as plt
|
117 |
+
from matplotlib.patches import Rectangle
|
118 |
+
|
119 |
+
# visualize the results with matplotlib
|
120 |
+
if self._results and self._inputs:
|
121 |
+
for img, res in zip(self._inputs, self._results):
|
122 |
+
plt.figure(figsize=(10, 10))
|
123 |
+
plt.imshow(img)
|
124 |
+
for obj in res:
|
125 |
+
xmin, ymin, xmax, ymax = obj["box"]
|
126 |
+
label = obj["label"]
|
127 |
+
plt.text(xmin, ymin, f"{label} {obj['confidence']:.2f}", color="red")
|
128 |
+
plt.gca().add_patch(
|
129 |
+
Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, edgecolor="red", linewidth=2)
|
130 |
+
)
|
131 |
+
plt.show(**kwargs)
|
doctr/contrib/base.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2021-2024, Mindee.
|
2 |
+
|
3 |
+
# This program is licensed under the Apache License 2.0.
|
4 |
+
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
5 |
+
|
6 |
+
from typing import Any, List, Optional
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from doctr.file_utils import requires_package
|
11 |
+
from doctr.utils.data import download_from_url
|
12 |
+
|
13 |
+
|
14 |
+
class _BasePredictor:
|
15 |
+
"""
|
16 |
+
Base class for all predictors
|
17 |
+
|
18 |
+
Args:
|
19 |
+
----
|
20 |
+
batch_size: the batch size to use
|
21 |
+
url: the url to use to download a model if needed
|
22 |
+
model_path: the path to the model to use
|
23 |
+
**kwargs: additional arguments to be passed to `download_from_url`
|
24 |
+
"""
|
25 |
+
|
26 |
+
def __init__(self, batch_size: int, url: Optional[str] = None, model_path: Optional[str] = None, **kwargs) -> None:
|
27 |
+
self.batch_size = batch_size
|
28 |
+
self.session = self._init_model(url, model_path, **kwargs)
|
29 |
+
|
30 |
+
self._inputs: List[np.ndarray] = []
|
31 |
+
self._results: List[Any] = []
|
32 |
+
|
33 |
+
def _init_model(self, url: Optional[str] = None, model_path: Optional[str] = None, **kwargs: Any) -> Any:
|
34 |
+
"""
|
35 |
+
Download the model from the given url if needed
|
36 |
+
|
37 |
+
Args:
|
38 |
+
----
|
39 |
+
url: the url to use
|
40 |
+
model_path: the path to the model to use
|
41 |
+
**kwargs: additional arguments to be passed to `download_from_url`
|
42 |
+
|
43 |
+
Returns:
|
44 |
+
-------
|
45 |
+
Any: the ONNX loaded model
|
46 |
+
"""
|
47 |
+
requires_package("onnxruntime", "`.contrib` module requires `onnxruntime` to be installed.")
|
48 |
+
import onnxruntime as ort
|
49 |
+
|
50 |
+
if not url and not model_path:
|
51 |
+
raise ValueError("You must provide either a url or a model_path")
|
52 |
+
onnx_model_path = model_path if model_path else str(download_from_url(url, cache_subdir="models", **kwargs)) # type: ignore[arg-type]
|
53 |
+
return ort.InferenceSession(onnx_model_path, providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
|
54 |
+
|
55 |
+
def preprocess(self, img: np.ndarray) -> np.ndarray:
|
56 |
+
"""
|
57 |
+
Preprocess the input image
|
58 |
+
|
59 |
+
Args:
|
60 |
+
----
|
61 |
+
img: the input image to preprocess
|
62 |
+
|
63 |
+
Returns:
|
64 |
+
-------
|
65 |
+
np.ndarray: the preprocessed image
|
66 |
+
"""
|
67 |
+
raise NotImplementedError
|
68 |
+
|
69 |
+
def postprocess(self, output: List[np.ndarray], input_images: List[List[np.ndarray]]) -> Any:
|
70 |
+
"""
|
71 |
+
Postprocess the model output
|
72 |
+
|
73 |
+
Args:
|
74 |
+
----
|
75 |
+
output: the model output to postprocess
|
76 |
+
input_images: the input images used to generate the output
|
77 |
+
|
78 |
+
Returns:
|
79 |
+
-------
|
80 |
+
Any: the postprocessed output
|
81 |
+
"""
|
82 |
+
raise NotImplementedError
|
83 |
+
|
84 |
+
def __call__(self, inputs: List[np.ndarray]) -> Any:
|
85 |
+
"""
|
86 |
+
Call the model on the given inputs
|
87 |
+
|
88 |
+
Args:
|
89 |
+
----
|
90 |
+
inputs: the inputs to use
|
91 |
+
|
92 |
+
Returns:
|
93 |
+
-------
|
94 |
+
Any: the postprocessed output
|
95 |
+
"""
|
96 |
+
self._inputs = inputs
|
97 |
+
model_inputs = self.session.get_inputs()
|
98 |
+
|
99 |
+
batched_inputs = [inputs[i : i + self.batch_size] for i in range(0, len(inputs), self.batch_size)]
|
100 |
+
processed_batches = [
|
101 |
+
np.array([self.preprocess(img) for img in batch], dtype=np.float32) for batch in batched_inputs
|
102 |
+
]
|
103 |
+
|
104 |
+
outputs = [self.session.run(None, {model_inputs[0].name: batch}) for batch in processed_batches]
|
105 |
+
return self.postprocess(outputs, batched_inputs)
|
doctr/datasets/__init__.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from doctr.file_utils import is_tf_available
|
2 |
+
|
3 |
+
from .generator import *
|
4 |
+
from .cord import *
|
5 |
+
from .detection import *
|
6 |
+
from .doc_artefacts import *
|
7 |
+
from .funsd import *
|
8 |
+
from .ic03 import *
|
9 |
+
from .ic13 import *
|
10 |
+
from .iiit5k import *
|
11 |
+
from .iiithws import *
|
12 |
+
from .imgur5k import *
|
13 |
+
from .mjsynth import *
|
14 |
+
from .ocr import *
|
15 |
+
from .recognition import *
|
16 |
+
from .orientation import *
|
17 |
+
from .sroie import *
|
18 |
+
from .svhn import *
|
19 |
+
from .svt import *
|
20 |
+
from .synthtext import *
|
21 |
+
from .utils import *
|
22 |
+
from .vocabs import *
|
23 |
+
from .wildreceipt import *
|
24 |
+
|
25 |
+
if is_tf_available():
|
26 |
+
from .loader import *
|
doctr/datasets/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (949 Bytes). View file
|
|
doctr/datasets/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (655 Bytes). View file
|
|
doctr/datasets/__pycache__/cord.cpython-311.pyc
ADDED
Binary file (6.39 kB). View file
|
|
doctr/datasets/__pycache__/cord.cpython-38.pyc
ADDED
Binary file (3.5 kB). View file
|
|
doctr/datasets/__pycache__/detection.cpython-311.pyc
ADDED
Binary file (6.35 kB). View file
|
|
doctr/datasets/__pycache__/detection.cpython-38.pyc
ADDED
Binary file (3.75 kB). View file
|
|
doctr/datasets/__pycache__/doc_artefacts.cpython-311.pyc
ADDED
Binary file (5.45 kB). View file
|
|
doctr/datasets/__pycache__/doc_artefacts.cpython-38.pyc
ADDED
Binary file (3.12 kB). View file
|
|
doctr/datasets/__pycache__/funsd.cpython-311.pyc
ADDED
Binary file (6.27 kB). View file
|
|
doctr/datasets/__pycache__/funsd.cpython-38.pyc
ADDED
Binary file (3.63 kB). View file
|
|
doctr/datasets/__pycache__/ic03.cpython-311.pyc
ADDED
Binary file (7.13 kB). View file
|
|
doctr/datasets/__pycache__/ic03.cpython-38.pyc
ADDED
Binary file (3.93 kB). View file
|
|
doctr/datasets/__pycache__/ic13.cpython-311.pyc
ADDED
Binary file (6.18 kB). View file
|
|
doctr/datasets/__pycache__/ic13.cpython-38.pyc
ADDED
Binary file (3.78 kB). View file
|
|
doctr/datasets/__pycache__/iiit5k.cpython-311.pyc
ADDED
Binary file (5.34 kB). View file
|
|
doctr/datasets/__pycache__/iiit5k.cpython-38.pyc
ADDED
Binary file (3.36 kB). View file
|
|
doctr/datasets/__pycache__/iiithws.cpython-311.pyc
ADDED
Binary file (4.14 kB). View file
|
|
doctr/datasets/__pycache__/iiithws.cpython-38.pyc
ADDED
Binary file (2.67 kB). View file
|
|
doctr/datasets/__pycache__/imgur5k.cpython-311.pyc
ADDED
Binary file (10.5 kB). View file
|
|
doctr/datasets/__pycache__/imgur5k.cpython-38.pyc
ADDED
Binary file (5.51 kB). View file
|
|
doctr/datasets/__pycache__/loader.cpython-311.pyc
ADDED
Binary file (4.77 kB). View file
|
|
doctr/datasets/__pycache__/loader.cpython-38.pyc
ADDED
Binary file (3.18 kB). View file
|
|
doctr/datasets/__pycache__/mjsynth.cpython-311.pyc
ADDED
Binary file (5.16 kB). View file
|
|
doctr/datasets/__pycache__/mjsynth.cpython-38.pyc
ADDED
Binary file (3.81 kB). View file
|
|
doctr/datasets/__pycache__/ocr.cpython-311.pyc
ADDED
Binary file (4.35 kB). View file
|
|
doctr/datasets/__pycache__/ocr.cpython-38.pyc
ADDED
Binary file (2.47 kB). View file
|
|
doctr/datasets/__pycache__/orientation.cpython-311.pyc
ADDED
Binary file (1.9 kB). View file
|
|
doctr/datasets/__pycache__/orientation.cpython-38.pyc
ADDED
Binary file (1.4 kB). View file
|
|
doctr/datasets/__pycache__/recognition.cpython-311.pyc
ADDED
Binary file (3.64 kB). View file
|
|
doctr/datasets/__pycache__/recognition.cpython-38.pyc
ADDED
Binary file (2.14 kB). View file
|
|
doctr/datasets/__pycache__/sroie.cpython-311.pyc
ADDED
Binary file (6.43 kB). View file
|
|
doctr/datasets/__pycache__/sroie.cpython-38.pyc
ADDED
Binary file (3.73 kB). View file
|
|
doctr/datasets/__pycache__/svhn.cpython-311.pyc
ADDED
Binary file (7.8 kB). View file
|
|
doctr/datasets/__pycache__/svhn.cpython-38.pyc
ADDED
Binary file (4.23 kB). View file
|
|
doctr/datasets/__pycache__/svt.cpython-311.pyc
ADDED
Binary file (6.8 kB). View file
|
|