bens_moveis / app.py
fschwartzer's picture
Update app.py
3bd1e98 verified
raw
history blame
No virus
3.7 kB
import gradio as gr
import requests
import pandas as pd
from scipy import stats
import google.generativeai as genai
# Proper configuration with your API key
genai.configure(api_key="AIzaSyCm57IpC9_TTL7U3m8wvje9_3qtfxAASgI") # Replace YOUR_API_KEY with the actual API key
# Set up the model configuration
generation_config = {
"temperature": 0.7,
"top_p": 1,
"top_k": 1,
"max_output_tokens": 2048,}
# Safety settings
safety_settings = [
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
]
# Initialize the model
model = genai.GenerativeModel(model_name="gemini-pro",
generation_config=generation_config,
safety_settings=safety_settings)
def gemini(query):
prompt_parts = [
f"input: \"Informar o preço médio de {query} em Reais brasileiros e explicar as características que podem representar variação nos preços\"",
"output: ",
f"input: {query}",
"output: ",
]
response = model.generate_content(prompt_parts)
return response.text
def fetch_data_to_dataframe(query, limit=50, source="mercadolibre", token=None):
if source == "mercadolibre":
BASE_URL = "https://api.mercadolibre.com/sites/MLB/search"
params = {'q': query, 'limit': limit}
response = requests.get(BASE_URL, params=params)
data = response.json()
if 'results' in data:
items = data['results']
df = pd.DataFrame(items)
df = df[['title', 'price', 'currency_id', 'condition', 'permalink']]
df.columns = ['Title', 'Price', 'Currency', 'Condition', 'Link']
else:
df = pd.DataFrame()
elif source == "amazon":
df = fetch_amazon_data(query, token)
elif source == "fipe":
df = fetch_fipe_data(query, token)
else:
df = pd.DataFrame()
# Process the DataFrame similarly for all sources if applicable
# This is an example for MercadoLibre data; adjust processing as needed for other sources
if not df.empty:
# Additional processing here, like calculating z-scores, filtering, etc.
pass
return df
def integrated_app(query):
# Interpret the prompt using the Gemini model
interpreted_response = gemini(query) # You'll need to adjust the gemini function to return a usable query term
# Fetch data from Mercado Livre based on the interpreted query
df = fetch_data_to_dataframe(interpreted_response, 50, source="mercadolibre")
if df.empty:
return "No data found", pd.DataFrame()
else:
# Process the fetched data
median_price = df['Price'].median()
# You could add more processing here based on the Gemini response
# Return the processed data
return median_price, df
# Define the updated Gradio interface
iface = gr.Interface(fn=integrated_app,
inputs=gr.Textbox(label="Digite sua consulta"),
outputs=[gr.Textbox(label="Preço Mediano"), gr.Dataframe(label="Resultados da Pesquisa")],
title="Análise Integrada de Bens",
description="Esta aplicação combina a interpretação de prompts via modelo Gemini com a busca de dados no Mercado Livre para oferecer uma análise de preços e características de bens.")
# Launch the interface
iface.launch()