artificialguybr's picture
Update app.py
1f986fb verified
raw
history blame
No virus
4.1 kB
import gradio as gr
import requests
import os
import base64
from PIL import Image
import io
import json
import copy
import secrets
from pathlib import Path
import re
# Funções auxiliares
def resize_image(image_path, max_size=(800, 800), quality=85):
with Image.open(image_path) as img:
img.thumbnail(max_size, Image.Resampling.LANCZOS)
buffer = io.BytesIO()
img.save(buffer, format="JPEG", quality=quality)
return buffer.getvalue()
def filepath_to_base64(image_path):
img_bytes = resize_image(image_path)
img_base64 = base64.b64encode(img_bytes)
return f"data:image/jpeg;base64,{img_base64.decode('utf-8')}"
api_key = os.getenv('API_KEY')
# Função principal da API
def call_fuyu_8b_api(image_path, content, temperature=0.2, top_p=0.7, max_tokens=1024):
image_base64 = filepath_to_base64(image_path)
invoke_url = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/9f757064-657f-4c85-abd7-37a7a9b6ee11"
headers = {
"Authorization": f"Bearer {api_key}",
"accept": "text/event-stream",
"content-type": "application/json",
}
payload = {
"messages": [
{
"content": f"{content} <img src=\"{image_base64}\" />",
"role": "user"
}
],
"temperature": temperature,
"top_p": top_p,
"max_tokens": max_tokens,
"stream": True
}
response = requests.post(invoke_url, headers=headers, json=payload, stream=True)
if response.status_code != 200:
print(f"Erro na requisição: {response.status_code}")
try:
error_details = response.json()
print(error_details)
except ValueError:
print(response.text)
return "Error in API call"
else:
response_text = ""
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
json_str = decoded_line[6:]
json_line = json.loads(json_str)
content_parts = json_line.get("choices", [{}])[0].get("delta", {}).get("content", "")
response_text += content_parts
return response_text
# Interface Gradio
def update_chatbot_with_response(chatbot, response):
chatbot.append(("Assistant", response))
def submit_response(chatbot, task_history, image_input, content_input, temperature_input, top_p_input, max_tokens_input):
image_path = image_input if image_input else "No image provided"
response = call_fuyu_8b_api(image_path, content_input, temperature_input, top_p_input, max_tokens_input)
update_chatbot_with_response(chatbot, response)
return chatbot, task_history
def reset_state(task_history):
task_history.clear()
return [], task_history
css = '''
.gradio-container{max-width:800px !important}
'''
with gr.Blocks(css=css) as demo:
gr.Markdown("# Fuyu-8B API Explorer with Enhanced Features")
chatbot = gr.Chatbot(label='Chatbot', elem_classes="control-height", height=520)
task_history = gr.State([])
with gr.Row():
addfile_btn = gr.File(label="📁 Upload Image", file_types=["jpg", "png"])
submit_btn = gr.Button("🚀 Submit")
regen_btn = gr.Button("🤔 Regenerate")
empty_bin = gr.Button("🧹 Clear History")
with gr.Accordion("Advanced Settings"):
content_input = gr.Textbox(lines=2, placeholder="Enter your content here...", label="Content")
temperature_input = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.2, label="Temperature")
top_p_input = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.7, label="Top P")
max_tokens_input = gr.Slider(minimum=1, maximum=1024, step=1, value=1024, label="Max Tokens")
submit_btn.click(submit_response, [chatbot, task_history, addfile_btn, content_input, temperature_input, top_p_input, max_tokens_input], [chatbot, task_history])
empty_bin.click(reset_state, [task_history], [chatbot, task_history])
demo.launch()