Felix92 commited on
Commit
d6cd37a
1 Parent(s): 12e901f

Add doctr-dummy-tf-master model

Browse files
.gitattributes CHANGED
@@ -30,3 +30,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
  *.zip filter=lfs diff=lfs merge=lfs -text
31
  *.zst filter=lfs diff=lfs merge=lfs -text
32
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
30
  *.zip filter=lfs diff=lfs merge=lfs -text
31
  *.zst filter=lfs diff=lfs merge=lfs -text
32
  *tfevents* filter=lfs diff=lfs merge=lfs -text
33
+ tf_model/weights.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
34
+ tf_model/weights.index filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language: en
4
+ ---
5
+
6
+ <p align="center">
7
+ <img src="https://doctr-static.mindee.com/models?id=v0.3.1/Logo_doctr.gif&src=0" width="60%">
8
+ </p>
9
+
10
+ **Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch**
11
+
12
+ ## Task: recognition
13
+
14
+ https://github.com/mindee/doctr
15
+
16
+ ### Example usage:
17
+
18
+ ```python
19
+ >>> from doctr.io import DocumentFile
20
+ >>> from doctr.models import ocr_predictor, from_hub
21
+
22
+ >>> img = DocumentFile.from_images(['<image_path>'])
23
+ >>> # Load your model from the hub
24
+ >>> model = from_hub('mindee/my-model')
25
+
26
+ >>> # Pass it to the predictor
27
+ >>> # If your model is a recognition model:
28
+ >>> predictor = ocr_predictor(det_arch='db_mobilenet_v3_large',
29
+ >>> reco_arch=model,
30
+ >>> pretrained=True)
31
+
32
+ >>> # If your model is a detection model:
33
+ >>> predictor = ocr_predictor(det_arch=model,
34
+ >>> reco_arch='crnn_mobilenet_v3_small',
35
+ >>> pretrained=True)
36
+
37
+ >>> # Get your predictions
38
+ >>> res = predictor(img)
39
+ ```
config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "mean": [
3
+ 0.694,
4
+ 0.695,
5
+ 0.693
6
+ ],
7
+ "std": [
8
+ 0.299,
9
+ 0.296,
10
+ 0.301
11
+ ],
12
+ "input_shape": [
13
+ 32,
14
+ 128,
15
+ 3
16
+ ],
17
+ "vocab": "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~°£€¥¢฿àâéèêëîïôùûüçÀÂÉÈÊËÎÏÔÙÛÜÇ",
18
+ "url": null,
19
+ "arch": "master",
20
+ "task": "recognition"
21
+ }
tf_model/checkpoint ADDED
@@ -0,0 +1,2 @@
 
 
1
+ model_checkpoint_path: "weights"
2
+ all_model_checkpoint_paths: "weights"
tf_model/weights.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b45111c9afb15b74fa4e95fef0e83cd687cb7e6382df65ea552515c86fdb3cb3
3
+ size 184371563
tf_model/weights.index ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f31cc21eb8ab12045f4a65684adf60608b131c552aca19b502ee70090e01cea
3
+ size 12847