nornorr commited on
Commit
dae1a39
1 Parent(s): edc1f86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -29
app.py CHANGED
@@ -1,43 +1,20 @@
1
  import streamlit as st
2
  from transformers import pipeline
3
 
4
- classifier = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
5
  def main():
6
- st.title("image-to-text")
7
 
8
- with st.form("image"):
9
- image = st.file_uploader('Choose a file')
10
  # clicked==True only when the button is clicked
11
  clicked = st.form_submit_button("Submit")
12
  if clicked:
13
- results = classifier([image])
14
  st.json(results)
15
- from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
16
-
17
- model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
18
- feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
19
- tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
20
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
- model.to(device)
22
- max_length = 16
23
- num_beams = 4
24
- gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
25
- def predict_step(image_paths):
26
- images = []
27
- for image_path in image_paths:
28
- i_image = Image.open(image_path)
29
- if i_image.mode != "RGB":
30
- i_image = i_image.convert(mode="RGB")
31
- images.append(i_image)
32
- pixel_values = feature_extractor(images=images, return_tensors="pt").pixel_values
33
- pixel_values = pixel_values.to(device)
34
- output_ids = model.generate(pixel_values, **gen_kwargs)
35
- preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
36
- preds = [pred.strip() for pred in preds]
37
- return preds
38
- predict_step(['doctor.e16ba4e4.jpg']
39
 
40
  if __name__ == "__main__":
41
  main()
42
 
 
43
  """'audio-classification', 'automatic-speech-recognition', 'conversational', 'document-question-answering', 'feature-extraction', 'fill-mask', 'image-classification', 'image-segmentation', 'image-to-text', 'ner', 'object-detection', 'question-answering', 'sentiment-analysis', 'summarization', 'table-question-answering', 'text-classification', 'text-generation', 'text2text-generation', 'token-classification', 'translation', 'visual-question-answering', 'vqa', 'zero-shot-classification', 'zero-shot-image-classification', 'translation_XX_to_YY'"""
 
1
  import streamlit as st
2
  from transformers import pipeline
3
 
4
+ classifier = pipeline("token-classification", model="samrawal/medical-sentence-tokenizer")
5
  def main():
6
+ st.title("Token classification")
7
 
8
+ with st.form("text_field"):
9
+ text = st.text_area('enter some text:')
10
  # clicked==True only when the button is clicked
11
  clicked = st.form_submit_button("Submit")
12
  if clicked:
13
+ results = classifier([text])
14
  st.json(results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  if __name__ == "__main__":
17
  main()
18
 
19
+
20
  """'audio-classification', 'automatic-speech-recognition', 'conversational', 'document-question-answering', 'feature-extraction', 'fill-mask', 'image-classification', 'image-segmentation', 'image-to-text', 'ner', 'object-detection', 'question-answering', 'sentiment-analysis', 'summarization', 'table-question-answering', 'text-classification', 'text-generation', 'text2text-generation', 'token-classification', 'translation', 'visual-question-answering', 'vqa', 'zero-shot-classification', 'zero-shot-image-classification', 'translation_XX_to_YY'"""