Spaces:
Runtime error
Runtime error
Update chain_app.py
Browse files- chain_app.py +23 -0
chain_app.py
CHANGED
@@ -5,16 +5,22 @@ from groq import Groq
|
|
5 |
import requests
|
6 |
from chainlit.input_widget import Select, Slider
|
7 |
import os
|
|
|
8 |
|
9 |
hf_token = os.environ.get("HF_TOKEN")
|
10 |
openai_api_key = os.environ.get('OPENAI_API_KEY')
|
11 |
groq_api_key = os.environ.get('GROQ_API_KEY')
|
|
|
12 |
|
13 |
hf_text_client = Client("Artin2009/text-generation", hf_token=hf_token)
|
14 |
# hf_image_client = Client('Artin2009/image-generation')
|
15 |
openai_client = OpenAI(api_key=openai_api_key)
|
16 |
groq_client = Groq(api_key=groq_api_key)
|
17 |
|
|
|
|
|
|
|
|
|
18 |
# API_URL = "https://api-inference.huggingface.co/models/PartAI/TookaBERT-Large"
|
19 |
# headers = {"Authorization": f"Bearer {hf_token}"}
|
20 |
|
@@ -70,6 +76,10 @@ async def chat_profile():
|
|
70 |
name="Llama-3-70B",
|
71 |
markdown_description="Meta Open Source model Llama-2 with 70B parameters",
|
72 |
),
|
|
|
|
|
|
|
|
|
73 |
cl.ChatProfile(
|
74 |
name="Llama-3-8B",
|
75 |
markdown_description="Meta Open Source model Llama-2 with 7B parameters",
|
@@ -677,6 +687,19 @@ async def main(message: cl.Message):
|
|
677 |
# content=output[0]['sequence']
|
678 |
# ).send()
|
679 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
680 |
@cl.on_settings_update
|
681 |
async def setup_agent(settings):
|
682 |
print("on_settings_update", settings)
|
|
|
5 |
import requests
|
6 |
from chainlit.input_widget import Select, Slider
|
7 |
import os
|
8 |
+
import cohere
|
9 |
|
10 |
hf_token = os.environ.get("HF_TOKEN")
|
11 |
openai_api_key = os.environ.get('OPENAI_API_KEY')
|
12 |
groq_api_key = os.environ.get('GROQ_API_KEY')
|
13 |
+
cohere_api_key = os.enviorn.get('COHERE_API_KEY')
|
14 |
|
15 |
hf_text_client = Client("Artin2009/text-generation", hf_token=hf_token)
|
16 |
# hf_image_client = Client('Artin2009/image-generation')
|
17 |
openai_client = OpenAI(api_key=openai_api_key)
|
18 |
groq_client = Groq(api_key=groq_api_key)
|
19 |
|
20 |
+
co = cohere.Client(
|
21 |
+
api_key=cohere_api_key, # This is your trial API key
|
22 |
+
)
|
23 |
+
|
24 |
# API_URL = "https://api-inference.huggingface.co/models/PartAI/TookaBERT-Large"
|
25 |
# headers = {"Authorization": f"Bearer {hf_token}"}
|
26 |
|
|
|
76 |
name="Llama-3-70B",
|
77 |
markdown_description="Meta Open Source model Llama-2 with 70B parameters",
|
78 |
),
|
79 |
+
cl.ChatProfile(
|
80 |
+
name='Aya-23B',
|
81 |
+
markdown_description='Cohere open sourced AI model with 23B parameters'
|
82 |
+
),
|
83 |
cl.ChatProfile(
|
84 |
name="Llama-3-8B",
|
85 |
markdown_description="Meta Open Source model Llama-2 with 7B parameters",
|
|
|
687 |
# content=output[0]['sequence']
|
688 |
# ).send()
|
689 |
|
690 |
+
elif chat_profile == 'Aya-23B':
|
691 |
+
stream = co.chat_stream(
|
692 |
+
model='c4ai-aya-23',
|
693 |
+
message=message.content,
|
694 |
+
temperature=0.3,
|
695 |
+
# chat_history=[{"role": "User", "message": "Hello"}, {"role": "Chatbot", "message": "Hello! How can I help you today?"}, {"role": "User", "message": "Hi"}, {"role": "User", "message": "hello"}],
|
696 |
+
prompt_truncation='OFF',
|
697 |
+
connectors=[],
|
698 |
+
)
|
699 |
+
|
700 |
+
for event in stream:
|
701 |
+
if event.event_type == "text-generation":
|
702 |
+
await cl.Message(content=event.text).send()
|
703 |
@cl.on_settings_update
|
704 |
async def setup_agent(settings):
|
705 |
print("on_settings_update", settings)
|