new model and fixed random seed
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ import os
|
|
6 |
|
7 |
from PIL import Image
|
8 |
from io import BytesIO
|
|
|
9 |
from transformers import VisionEncoderDecoderModel, VisionEncoderDecoderConfig, DonutProcessor, DonutImageProcessor, AutoTokenizer
|
10 |
|
11 |
from logits_ngrams import NoRepeatNGramLogitsProcessor, get_table_token_ids
|
@@ -29,6 +30,7 @@ def run_prediction(sample, model, processor, mode):
|
|
29 |
np.float32,
|
30 |
), return_tensors="pt").pixel_values
|
31 |
|
|
|
32 |
with torch.no_grad():
|
33 |
outputs = model.generate(
|
34 |
pixel_values.to(device),
|
@@ -37,7 +39,7 @@ def run_prediction(sample, model, processor, mode):
|
|
37 |
do_sample=True,
|
38 |
top_p=0.92,
|
39 |
top_k=5,
|
40 |
-
no_repeat_ngram_size=
|
41 |
num_beams=3,
|
42 |
output_attentions=False,
|
43 |
output_hidden_states=False,
|
@@ -81,7 +83,7 @@ else:
|
|
81 |
st.image(image, caption='Your target document')
|
82 |
|
83 |
with st.spinner(f'Processing the document ...'):
|
84 |
-
pre_trained_model = "unstructuredio/chipper-fast-fine-tuning
|
85 |
processor = DonutProcessor.from_pretrained(pre_trained_model, token=os.environ['HF_TOKEN'])
|
86 |
|
87 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
6 |
|
7 |
from PIL import Image
|
8 |
from io import BytesIO
|
9 |
+
import transformers
|
10 |
from transformers import VisionEncoderDecoderModel, VisionEncoderDecoderConfig, DonutProcessor, DonutImageProcessor, AutoTokenizer
|
11 |
|
12 |
from logits_ngrams import NoRepeatNGramLogitsProcessor, get_table_token_ids
|
|
|
30 |
np.float32,
|
31 |
), return_tensors="pt").pixel_values
|
32 |
|
33 |
+
transformers.set_seed(42)
|
34 |
with torch.no_grad():
|
35 |
outputs = model.generate(
|
36 |
pixel_values.to(device),
|
|
|
39 |
do_sample=True,
|
40 |
top_p=0.92,
|
41 |
top_k=5,
|
42 |
+
no_repeat_ngram_size=10,
|
43 |
num_beams=3,
|
44 |
output_attentions=False,
|
45 |
output_hidden_states=False,
|
|
|
83 |
st.image(image, caption='Your target document')
|
84 |
|
85 |
with st.spinner(f'Processing the document ...'):
|
86 |
+
pre_trained_model = "unstructuredio/chipper-fast-fine-tuning"
|
87 |
processor = DonutProcessor.from_pretrained(pre_trained_model, token=os.environ['HF_TOKEN'])
|
88 |
|
89 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|