caiocsr commited on
Commit
eab8f8d
·
verified ·
1 Parent(s): 7728083

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -8
app.py CHANGED
@@ -4,7 +4,10 @@ from huggingface_hub import InferenceClient
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
8
 
9
 
10
  def respond(
@@ -27,17 +30,16 @@ def respond(
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
 
31
  messages,
32
  max_tokens=max_tokens,
33
- stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
42
 
43
  """
@@ -61,4 +63,4 @@ demo = gr.ChatInterface(
61
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
+ #client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+ #client = InferenceClient("meta-llama/Llama-3.2-1B-Instruct")
9
+ #client = InferenceClient("microsoft/Phi-3.5-mini-instruct")
10
+ client = InferenceClient("unsloth/Llama-3.2-1B-Instruct")
11
 
12
 
13
  def respond(
 
30
 
31
  response = ""
32
 
33
+
34
+ mensagens = client.chat_completion(
35
  messages,
36
  max_tokens=max_tokens,
 
37
  temperature=temperature,
38
  top_p=top_p,
39
+ )
40
+ response = mensagens.choices[0].message.content
41
 
42
+ return response
 
43
 
44
 
45
  """
 
63
 
64
 
65
  if __name__ == "__main__":
66
+ demo.launch()