ivelin commited on
Commit
81c6fa4
1 Parent(s): 0bf3b92

switch to best current checkpoint

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -6,11 +6,12 @@ import torch
6
  import html
7
  from transformers import DonutProcessor, VisionEncoderDecoderModel
8
 
9
- pretrained_repo_name = "ivelin/donut-refexp-combined-v1"
 
10
  print(f"Loading model checkpoint: {pretrained_repo_name}")
11
 
12
- processor = DonutProcessor.from_pretrained(pretrained_repo_name)
13
- model = VisionEncoderDecoderModel.from_pretrained(pretrained_repo_name)
14
 
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
  model.to(device)
 
6
  import html
7
  from transformers import DonutProcessor, VisionEncoderDecoderModel
8
 
9
+ pretrained_repo_name = 'ivelin/donut-refexp-combined-v1'
10
+ pretrained_revision = '41210d7c42a22e77711711ec45508a6b63ec380f' # : IoU=0.42 # 'main' for latest revision
11
  print(f"Loading model checkpoint: {pretrained_repo_name}")
12
 
13
+ processor = DonutProcessor.from_pretrained(pretrained_repo_name, revision=pretrained_revision)
14
+ model = VisionEncoderDecoderModel.from_pretrained(pretrained_repo_name, revision=pretrained_revision)
15
 
16
  device = "cuda" if torch.cuda.is_available() else "cpu"
17
  model.to(device)