Update app.py
Browse files
app.py
CHANGED
@@ -3,32 +3,26 @@ from huggingface_hub import InferenceClient
|
|
3 |
from dotenv import load_dotenv
|
4 |
import os
|
5 |
|
6 |
-
# Load environment variables from .env file
|
7 |
load_dotenv()
|
8 |
|
9 |
-
# Get the system message from environment variables
|
10 |
system_message = os.getenv("SYSTEM_MESSAGE")
|
11 |
|
12 |
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
|
13 |
|
14 |
def respond(message, history, max_tokens, temperature, top_p):
|
15 |
-
|
16 |
messages = [{"role": "system", "content": system_message}]
|
17 |
|
18 |
-
# Add the conversation history to the messages list
|
19 |
for user_msg, assistant_msg in history:
|
20 |
if user_msg:
|
21 |
messages.append({"role": "user", "content": user_msg})
|
22 |
if assistant_msg:
|
23 |
messages.append({"role": "assistant", "content": assistant_msg})
|
24 |
|
25 |
-
# Add the latest user message to the messages list
|
26 |
messages.append({"role": "user", "content": message})
|
27 |
|
28 |
-
# Initialize an empty response string
|
29 |
response = ""
|
30 |
|
31 |
-
# Generate the response using the Hugging Face InferenceClient
|
32 |
for message in client.chat_completion(
|
33 |
messages,
|
34 |
max_tokens=max_tokens,
|
@@ -40,7 +34,6 @@ def respond(message, history, max_tokens, temperature, top_p):
|
|
40 |
response += token
|
41 |
yield response
|
42 |
|
43 |
-
# Define the Gradio interface
|
44 |
demo = gr.ChatInterface(
|
45 |
respond,
|
46 |
additional_inputs=[
|
@@ -50,6 +43,5 @@ demo = gr.ChatInterface(
|
|
50 |
]
|
51 |
)
|
52 |
|
53 |
-
# Launch the Gradio app
|
54 |
if __name__ == "__main__":
|
55 |
demo.launch()
|
|
|
3 |
from dotenv import load_dotenv
|
4 |
import os
|
5 |
|
|
|
6 |
load_dotenv()
|
7 |
|
|
|
8 |
system_message = os.getenv("SYSTEM_MESSAGE")
|
9 |
|
10 |
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
|
11 |
|
12 |
def respond(message, history, max_tokens, temperature, top_p):
|
13 |
+
|
14 |
messages = [{"role": "system", "content": system_message}]
|
15 |
|
|
|
16 |
for user_msg, assistant_msg in history:
|
17 |
if user_msg:
|
18 |
messages.append({"role": "user", "content": user_msg})
|
19 |
if assistant_msg:
|
20 |
messages.append({"role": "assistant", "content": assistant_msg})
|
21 |
|
|
|
22 |
messages.append({"role": "user", "content": message})
|
23 |
|
|
|
24 |
response = ""
|
25 |
|
|
|
26 |
for message in client.chat_completion(
|
27 |
messages,
|
28 |
max_tokens=max_tokens,
|
|
|
34 |
response += token
|
35 |
yield response
|
36 |
|
|
|
37 |
demo = gr.ChatInterface(
|
38 |
respond,
|
39 |
additional_inputs=[
|
|
|
43 |
]
|
44 |
)
|
45 |
|
|
|
46 |
if __name__ == "__main__":
|
47 |
demo.launch()
|