Spaces:
Runtime error
Runtime error
import streamlit as st | |
import transformers | |
import torch | |
from transformers import AutoModel, AutoTokenizer | |
model_path = 'openbmb/MiniCPM-Llama3-V-2_5' | |
if 'int4' in model_path: | |
if device == 'mps': | |
print('Error: running int4 model with bitsandbytes on Mac is not supported right now.') | |
exit() | |
model = AutoModel.from_pretrained(model_path, trust_remote_code=True) | |
else: | |
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to(dtype=torch.float16) | |
model = model.to(device=device) | |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) | |
model.eval() | |
st.title('Ollama Chatbot') | |
# File uploader for the image | |
image = st.file_uploader('Upload an image') | |
# Button to submit the image for analysis | |
if st.button('Submit'): | |
if image is not None: | |
# Read the uploaded image file | |
image_bytes = image.read() | |
# Send the image to the Ollama chatbot for analysis | |
response = model.chat( | |
image=image, | |
msgs=msgs, | |
tokenizer=tokenizer, | |
sampling=True, | |
temperature=0.1 | |
) | |
# Display the chatbot's response | |
st.write(response) | |