alex-abb commited on
Commit
b11c8cd
·
verified ·
1 Parent(s): 57d46c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -34
app.py CHANGED
@@ -1,44 +1,59 @@
1
- import gradio as gr
2
- import requests
3
  import os
4
- import spaces
5
-
6
 
7
- API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
8
  api_token = os.environ.get("TOKEN")
 
9
  headers = {"Authorization": f"Bearer {api_token}"}
10
- @spaces.GPU
 
 
11
  def query(payload):
12
  response = requests.post(API_URL, headers=headers, json=payload)
13
  return response.json()
14
 
15
- def generate_response(prompt):
16
- payload = {
17
- "inputs": prompt,
18
- "parameters": {
19
- "max_new_tokens": 100,
20
- "temperature": 0.7,
21
- "top_p": 0.95,
22
- "do_sample": True
23
- }
24
- }
25
-
26
- response = query(payload)
27
-
28
- if isinstance(response, list) and len(response) > 0:
29
- return response[0].get('generated_text', '')
30
- elif isinstance(response, dict) and 'generated_text' in response:
31
- return response['generated_text']
32
- return "Désolé, je n'ai pas pu générer de réponse."
33
-
34
- def chatbot(message, history):
35
- response = generate_response(message)
36
- return response
37
-
38
- iface = gr.ChatInterface(
39
- fn=chatbot,
40
- title="Chatbot Meta-Llama-3-8B-Instruct",
41
- description="Interagissez avec le modèle Meta-Llama-3-8B-Instruct."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  )
43
 
44
- iface.launch()
 
 
 
1
  import os
2
+ import requests
3
+ import gradio as gr
4
 
 
5
  api_token = os.environ.get("TOKEN")
6
+ API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
7
  headers = {"Authorization": f"Bearer {api_token}"}
8
+
9
+
10
+
11
  def query(payload):
12
  response = requests.post(API_URL, headers=headers, json=payload)
13
  return response.json()
14
 
15
+ def analyze_sentiment(text):
16
+ output = query({
17
+ "inputs": f'''<|begin_of_text|>
18
+ <|start_header_id|>system<|end_header_id|>
19
+ You're going to analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories: for posts that talk about chat models/LLM you'll return "Chatmodel/LLM", for posts that talk about image generation models you'll return "image_generation", for texts that ask for information from the community you'll return "questions".For texts about recent discoveries that don't fit into the previous categories, you'll return "other".
20
+
21
+ <|eot_id|>
22
+ <|start_header_id|>user<|end_header_id|>
23
+ {text}
24
+ <|eot_id|>
25
+ <|start_header_id|>assistant<|end_header_id|>
26
+
27
+ '''
28
+ })
29
+
30
+
31
+
32
+ if isinstance(output, list) and len(output) > 0:
33
+ response = output[0].get('generated_text', '').strip().lower()
34
+
35
+
36
+ questions = response.count('questions')
37
+ ChatmodelLLM = response.count('Chatmodel/LLM')
38
+ other = response.count('other')
39
+ image_generation = response.count("image_generation")
40
+
41
+ if questions == 2:
42
+ return 'questions'
43
+ elif ChatmodelLLM == 2:
44
+ return 'Chat Model/LLM'
45
+ elif other == 2 :
46
+ return "Other"
47
+ elif image_generation == 2 :
48
+ return "Image Generation"
49
+ else :
50
+ return f"Erreur: Réponse ambiguë - '{response}'"
51
+
52
+
53
+ demo = gr.Interface(
54
+ fn=analyze_sentiment,
55
+ inputs="text",
56
+ outputs="text"
57
  )
58
 
59
+ demo.launch()