the-future-dev commited on
Commit
3f375a2
1 Parent(s): ed236ae

first commit

Browse files
Files changed (3) hide show
  1. app.py +22 -0
  2. kosmos.py +33 -0
  3. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ import kosmos
4
+
5
+ kosmos.load_model()
6
+ st.title('KOSMOS2 Model Evaluation')
7
+
8
+ uploaded_file = st.file_uploader("Choose an image...", type="jpg")
9
+
10
+ if uploaded_file is not None:
11
+ try:
12
+ image = Image.open(uploaded_file)
13
+ st.image(image, caption='Uploaded Image.', use_column_width=True)
14
+ st.write("")
15
+ prompt = st.text_input(f"Detect the main object in the image. The image is a")
16
+ if st.button('Classify'):
17
+ st.write(f"User: {prompt}")
18
+ with st.spinner('Processing...'):
19
+ label = kosmos.single_image_classification(image, prompt, 50)
20
+ st.write(f"Model: {label}")
21
+ except Exception as e:
22
+ st.error(f"An error occurred: {e}")
kosmos.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoProcessor, AutoModelForVision2Seq
2
+ from PIL import Image
3
+ from os import path
4
+ from torchvision.transforms import ToTensor
5
+
6
+ model_id = "microsoft/kosmos-2-patch14-224"
7
+ model = None
8
+ processor = None
9
+
10
+ def load_model():
11
+ global model, processor
12
+ model = AutoModelForVision2Seq.from_pretrained(model_id)
13
+ processor = AutoProcessor.from_pretrained(model_id)
14
+
15
+ def single_image_classification(image, prompt="", max_new_tokens=30):
16
+ if model is None or processor is None:
17
+ load_model()
18
+ inputs = processor(text=prompt, images=image, return_tensors="pt")
19
+
20
+ generated_ids = model.generate(
21
+ pixel_values=inputs["pixel_values"],
22
+ input_ids=inputs["input_ids"],
23
+ attention_mask=inputs["attention_mask"],
24
+ image_embeds=None,
25
+ image_embeds_position_mask=inputs["image_embeds_position_mask"],
26
+ use_cache=True,
27
+ max_new_tokens=max_new_tokens,
28
+ )
29
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
30
+
31
+ processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=True)
32
+
33
+ return processed_text
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ torchvision
4
+ streamlit