Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,44 +1,59 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import requests
|
3 |
import os
|
4 |
-
import
|
5 |
-
|
6 |
|
7 |
-
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
|
8 |
api_token = os.environ.get("TOKEN")
|
|
|
9 |
headers = {"Authorization": f"Bearer {api_token}"}
|
10 |
-
|
|
|
|
|
11 |
def query(payload):
|
12 |
response = requests.post(API_URL, headers=headers, json=payload)
|
13 |
return response.json()
|
14 |
|
15 |
-
def
|
16 |
-
|
17 |
-
"inputs":
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
)
|
43 |
|
44 |
-
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
import requests
|
3 |
+
import gradio as gr
|
4 |
|
|
|
5 |
api_token = os.environ.get("TOKEN")
|
6 |
+
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
|
7 |
headers = {"Authorization": f"Bearer {api_token}"}
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
def query(payload):
|
12 |
response = requests.post(API_URL, headers=headers, json=payload)
|
13 |
return response.json()
|
14 |
|
15 |
+
def analyze_sentiment(text):
|
16 |
+
output = query({
|
17 |
+
"inputs": f'''<|begin_of_text|>
|
18 |
+
<|start_header_id|>system<|end_header_id|>
|
19 |
+
You're going to analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories: for posts that talk about chat models/LLM you'll return "Chatmodel/LLM", for posts that talk about image generation models you'll return "image_generation", for texts that ask for information from the community you'll return "questions".For texts about recent discoveries that don't fit into the previous categories, you'll return "other".
|
20 |
+
|
21 |
+
<|eot_id|>
|
22 |
+
<|start_header_id|>user<|end_header_id|>
|
23 |
+
{text}
|
24 |
+
<|eot_id|>
|
25 |
+
<|start_header_id|>assistant<|end_header_id|>
|
26 |
+
|
27 |
+
'''
|
28 |
+
})
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
if isinstance(output, list) and len(output) > 0:
|
33 |
+
response = output[0].get('generated_text', '').strip().lower()
|
34 |
+
|
35 |
+
|
36 |
+
questions = response.count('questions')
|
37 |
+
ChatmodelLLM = response.count('Chatmodel/LLM')
|
38 |
+
other = response.count('other')
|
39 |
+
image_generation = response.count("image_generation")
|
40 |
+
|
41 |
+
if questions == 2:
|
42 |
+
return 'questions'
|
43 |
+
elif ChatmodelLLM == 2:
|
44 |
+
return 'Chat Model/LLM'
|
45 |
+
elif other == 2 :
|
46 |
+
return "Other"
|
47 |
+
elif image_generation == 2 :
|
48 |
+
return "Image Generation"
|
49 |
+
else :
|
50 |
+
return f"Erreur: Réponse ambiguë - '{response}'"
|
51 |
+
|
52 |
+
|
53 |
+
demo = gr.Interface(
|
54 |
+
fn=analyze_sentiment,
|
55 |
+
inputs="text",
|
56 |
+
outputs="text"
|
57 |
)
|
58 |
|
59 |
+
demo.launch()
|