alex-abb's picture
Update app.py
d7b565c verified
raw
history blame
1.48 kB
import gradio as gr
import requests
import spaces
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
headers = {"Authorization": "Bearer hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}
@spaces.GPU
def analyze_sentiment(text):
payload = {
"inputs": f"Analyze the sentiment of the following text and respond with either 'heureux' or 'malheureux': {text}"
}
try:
response = requests.post(API_URL, headers=headers, json=payload)
response.raise_for_status() # Vérifie si la requête a réussi
result = response.json()
if isinstance(result, list) and len(result) > 0 and 'generated_text' in result[0]:
sentiment = result[0]['generated_text'].strip().lower()
return "heureux" if "heureux" in sentiment else "malheureux"
else:
return "Erreur: Format de réponse inattendu"
except requests.exceptions.RequestException as e:
return f"Erreur de requête: {str(e)}"
except Exception as e:
return f"Erreur inattendue: {str(e)}"
def gradio_interface(input_text):
return analyze_sentiment(input_text)
iface = gr.Interface(
fn=gradio_interface,
inputs=gr.Textbox(lines=3, placeholder="Entrez votre texte ici..."),
outputs=gr.Label(num_top_classes=1),
title="Analyseur de Sentiment",
description="Entrez un texte pour déterminer si le sentiment est 'heureux' ou 'malheureux'."
)
iface.launch(share=True)