Spaces:
Runtime error
Runtime error
| import torch | |
| import streamlit as st | |
| import numpy as np | |
| from PIL import Image, ImageDraw | |
| from transformers import pipeline | |
| from tempfile import NamedTemporaryFile | |
| imagepipe = pipeline("image-classification", model="flatmoon102/fruits_and_vegetables_image_classification") | |
| detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection") | |
| uploaded_image_file = st.file_uploader("Choose an image file") | |
| if uploaded_image_file is not None: | |
| with NamedTemporaryFile() as temp: | |
| temp.write(uploaded_image_file.getvalue()) | |
| temp.seek(0) | |
| result = imagepipe(temp.name) | |
| st.write(result) | |
| st.title('Upload an image file to detection') | |
| uploaded_image_zero_file = st.file_uploader("Choose an image file (zero)") | |
| texts = st.text_input('tags') | |
| if uploaded_image_zero_file is not None: | |
| image = Image.open(uploaded_image_zero_file) | |
| #temp.write(uploaded_image_file.getvalue()) | |
| #temp.seek(0) | |
| outputImage = np.array(image) | |
| predictions = detector( | |
| image, | |
| candidate_labels=['eggs', 'apple', 'pear'] | |
| ) | |
| st.image(outputImage) | |
| if st.button('apply tag'): | |
| tags = [['eggs', 'apple', 'pear']] | |
| #inputs = processor(text=tags, images=image, return_tensors="pt") | |
| #outputs = model(**inputs) | |
| #target_sizes = torch.Tensor([image.size[::-1]]) | |
| #results = processor.post_process_object_detection(outputs=outputs, threshold=0.1, target_sizes=target_sizes) | |
| draw = ImageDraw.Draw(image) | |
| for prediction in predictions: | |
| box = prediction["box"] | |
| label = prediction["label"] | |
| score = prediction["score"] | |
| xmin, ymin, xmax, ymax = box.values() | |
| draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=3) | |
| draw.text((xmin, ymin), f"{label}: {round(score,5)}", fill="white") | |
| st.image(image) | |
| st.write(predictions) | |