Radiator_Car / app.py
arshadrana's picture
Update app.py
512db9b verified
import gradio as gr
from PIL import Image
import os
from together import Together
import base64
import io
# Initialize Together client
client = None
def initialize_client(api_key=None):
global client
api_key = os.getenv("TOGETHER_API_KEY")
print(api_key)
client = Together()
def encode_image(image_path):
with Image.open(image_path) as img:
buffered = io.BytesIO()
img.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def bot_streaming(image_path, history):
max_new_tokens = 250
temperature = 0.7
if client is None:
try:
initialize_client()
except Exception as e:
history.append(("Error initializing client", f"{str(e)}"))
yield history
return
prompt = """
You are expert in computer vision and autos inspection from image analysis. Inspect the auto's radiator core support for damage resulting from a front-end
collision. Check for any bent, cracked, or misaligned components, including the core support bracket, mounting points,
and frame sections.
Ensure that the radiator is securely mounted and assess if there are any signs of stress or deformation caused by the accident.
then tell it as accidental if not then not accidental.
"""
messages = [{"role": "system", "content": prompt}]
# Encode the image and add to messages
image_base64 = encode_image(image_path)
messages.append({
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{image_base64}"}
}
]
})
history = history + [("Image uploaded", "")]
try:
stream = client.chat.completions.create(
model="meta-llama/Llama-Vision-Free",
messages=messages,
max_tokens=max_new_tokens,
temperature=temperature,
stream=True,
)
response = ""
for chunk in stream:
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
response += chunk.choices[0].delta.content
history[-1] = ("Image uploaded", response)
yield history
if not response:
history[-1] = ("Image uploaded", "No response generated. Please try again.")
yield history
except Exception as e:
error_message = (
"The image is too large. Please try with a smaller image or compress the existing one."
if "Request Entity Too Large" in str(e)
else f"An error occurred: {str(e)}"
)
history[-1] = ("Image uploaded", error_message)
yield history
# Set up Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Radiator Accident Detection")
gr.Markdown("Upload an image of a radiator to determine if it shows signs of an accident")
chatbot = gr.Chatbot()
img = gr.Image(type="filepath", label="Upload Radiator Image")
clear = gr.Button("Clear")
img.upload(bot_streaming, inputs=[img, chatbot], outputs=chatbot)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch(debug=True)