musadac commited on
Commit
a4172ab
1 Parent(s): 2c72c7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -80,21 +80,21 @@ processortext2 = CustomOCRProcessor(image_processor,tokenizer)
80
  import os
81
  huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
82
  model = {}
83
- model['single-urdu'] = VisionEncoderDecoderModel.from_pretrained("musadac/vilanocr-single-urdu", use_auth_token=huggingface_token)
84
- model['multi-urdu'] = VisionEncoderDecoderModel.from_pretrained("musadac/ViLanOCR", use_auth_token=huggingface_token)
85
- model['medical'] = VisionEncoderDecoderModel.from_pretrained("musadac/vilanocr-multi-medical", use_auth_token=huggingface_token)
86
- model['chinese'] = VisionEncoderDecoderModel.from_pretrained("musadac/vilanocr-single-chinese", use_auth_token=huggingface_token)
87
 
88
  st.title("Image OCR with musadac/vilanocr")
89
  model_name = st.selectbox("Choose an OCR model", ["single-urdu", "multi-urdu", "medical","chinese" ])
90
  uploaded_file = st.file_uploader("Choose an image", type=["jpg", "jpeg", "png"])
91
  if uploaded_file is not None:
92
-
93
  img = Image.open(uploaded_file).convert("RGB")
94
  pixel_values = processortext2(img.convert("RGB"), return_tensors="pt").pixel_values
95
 
96
  with torch.no_grad():
97
- generated_ids = model[model_name].generate(pixel_values)
98
 
99
  result = processortext2.batch_decode(generated_ids, skip_special_tokens=True)[0]
100
  st.write("OCR Result:")
 
80
  import os
81
  huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
82
  model = {}
83
+ model['single-urdu'] = "musadac/vilanocr-single-urdu"
84
+ model['multi-urdu'] = "musadac/ViLanOCR"
85
+ model['medical'] = "musadac/vilanocr-multi-medical"
86
+ model['chinese'] = "musadac/vilanocr-single-chinese"
87
 
88
  st.title("Image OCR with musadac/vilanocr")
89
  model_name = st.selectbox("Choose an OCR model", ["single-urdu", "multi-urdu", "medical","chinese" ])
90
  uploaded_file = st.file_uploader("Choose an image", type=["jpg", "jpeg", "png"])
91
  if uploaded_file is not None:
92
+ model2 = VisionEncoderDecoderModel.from_pretrained(model[model_name], use_auth_token=huggingface_token)
93
  img = Image.open(uploaded_file).convert("RGB")
94
  pixel_values = processortext2(img.convert("RGB"), return_tensors="pt").pixel_values
95
 
96
  with torch.no_grad():
97
+ generated_ids = model2.generate(pixel_values)
98
 
99
  result = processortext2.batch_decode(generated_ids, skip_special_tokens=True)[0]
100
  st.write("OCR Result:")