import chainlit as cl from gradio_client import Client from openai import OpenAI from groq import Groq import requests from chainlit.input_widget import Select, Slider import os import cohere from huggingface_hub import InferenceClient hf_token = os.environ.get("HF_TOKEN") hf_token_llama_3_1 = os.environ.get('HF_TOKEN_FOR_31') openai_api_key = os.environ.get('OPENAI_API_KEY') groq_api_key = os.environ.get('GROQ_API_KEY') cohere_api_key = os.environ.get('COHERE_API_KEY') hf_text_client = Client("Artin2009/text-generation", hf_token=hf_token) # hf_image_client = Client('Artin2009/image-generation') openai_client = OpenAI(api_key=openai_api_key) groq_client = Groq(api_key=groq_api_key) co = cohere.Client( api_key=cohere_api_key, ) # API_URL = "https://api-inference.huggingface.co/models/PartAI/TookaBERT-Large" # headers = {"Authorization": f"Bearer {hf_token}"} # def query(payload): # response = requests.post(API_URL, headers=headers, json=payload) # return response.json() @cl.set_chat_profiles async def chat_profile(): return [ cl.ChatProfile( name="None", markdown_description="None", ), cl.ChatProfile( name="neural-brain-AI", markdown_description="The main model of neural brain", ), cl.ChatProfile( name="Dorna-AI", markdown_description="One of the open-sourced models that neural brain team fine-tuned", ), # cl.ChatProfile( # name='Image-Generation', # markdown_description='Our image generation model, has a performance like midjourney', # ), cl.ChatProfile( name="gpt4-o-mini", markdown_description="The best state of the art openai model", ), cl.ChatProfile( name="GPT-4", markdown_description="OpenAI's GPT-4 model", ), cl.ChatProfile( name="gpt-3.5-turbo", markdown_description="OpenAI's GPT-3.5 Turbo model", ), # cl.ChatProfile( # name="GPT-3.5-turbo-0125", # markdown_description="OpenAI's GPT-3.5 Turbo 0125 model", # ), cl.ChatProfile( name="gpt-3.5-turbo-1106", markdown_description="OpenAI's GPT-3.5 Turbo 1106 model", ), # cl.ChatProfile( # name="davinci-002", # markdown_description="OpenAI's Davinci-002 model", # ), cl.ChatProfile( name="TTS", markdown_description="OpenAI's Text-to-Speech model", ), cl.ChatProfile( name="Qwen2-57B", markdown_description="Qwen second generation model with 57B parameters", ), cl.ChatProfile( name="Qwen2-7B", markdown_description="Qwen second generation model with 7B parameters", ), cl.ChatProfile( name="Qwen2-1.5B", markdown_description="Qwen second generation model with 1.5B parameters", ), cl.ChatProfile( name="Qwen2-0.5B", markdown_description="Qwen second generation model with 0.5B parameters", ), cl.ChatProfile( name="Qwen1.5-110B", markdown_description="Qwen first generation improved model with 110B parameters", ), cl.ChatProfile( name="Qwen1.5-72B", markdown_description="Qwen first generation improved model with 72B parameters", ), cl.ChatProfile( name="Qwen1.5-32B", markdown_description="Qwen first generation improved model with 32B parameters", ), cl.ChatProfile( name="Qwen1.5-2.7B", markdown_description="Qwen first generation improved model with 2.7B parameters", ), cl.ChatProfile( name="Qwen-72B", markdown_description="Qwen first generation model with 72B parameters", ), cl.ChatProfile( name="Qwen-14B", markdown_description="Qwen first generation model with 14B parameters", ), cl.ChatProfile( name="Qwen-7B", markdown_description="Qwen first generation model with 7B parameters", ), cl.ChatProfile( name="Llama-3.1-405B", markdown_description="Meta Open Source Model Llama with 405B parameters", ), cl.ChatProfile( name="Llama-3.1-70B", markdown_description="Meta Open Source Model Llama with 70B parameters", ), cl.ChatProfile( name="Llama-3.1-8B", markdown_description="Meta Open Source Model Llama with 8B parameters", ), cl.ChatProfile( name="Llama-3-70B", markdown_description="Meta Open Source model Llama-3 with 70B parameters", ), cl.ChatProfile( name='Aya-23B', markdown_description='Cohere open sourced AI model with 23B parameters' ), cl.ChatProfile( name="Llama-3-8B", markdown_description="Meta Open Source model Llama-2 with 7B parameters", ), cl.ChatProfile( name = "gemma2-9B", markdown_description = 'Google Generation 2 Open Source LLM with 9B parameters' ), cl.ChatProfile( name = "gemma-7B", markdown_description = 'Google Generation 1 Open Source LLM with 7B parameters' ), cl.ChatProfile( name="zephyr-7B", markdown_description="Open Source model Zephyr with 7B parameters", ), cl.ChatProfile( name='mistral-7B', markdown_description = 'mistral open source LLM with 7B parameters' ), # cl.ChatProfile( # name="Toka-353M", # markdown_description="PartAI Open Source model Toka with 353M parameters", # ) ] @cl.on_chat_start async def on_chat_start(): chat_profile = cl.user_session.get("chat_profile") if not chat_profile: await cl.Message( content='please choose a model to start' ).send() if chat_profile == 'neural-brain-AI': await cl.ChatSettings( [ Select( id="NB-Model", label="NeuralBrain - Models", values=["Neural Brain AI"], initial_index=0, ) ] ).send() await cl.Message( content="Hello, I am the main model of neural brain team, i am an instance of ChatGPT-4, This team finetuned me and i am ready to help you" ).send() if chat_profile == 'Dorna-AI': await cl.ChatSettings( [ Select( id="param_3", label="Parameter 3", values=["512"], # Only one selectable value initial_index=0, tooltip="Config parameter 3 (e.g., max tokens)", ), Select( id="param_4", label="Parameter 4", values=["0.7"], # Only one selectable value initial_index=0, tooltip="Config parameter 4 (e.g., temperature)", ), Select( id="param_5", label="Parameter 5", values=["0.95"], # Only one selectable value initial_index=0, tooltip="Config parameter 5 (e.g., top_p)", ), Select( id="api_name", label="API Name", values=["/chat"], initial_index=0, ), ] ).send() await cl.Message( content='my name is Dorna, Your AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!' ).send() if chat_profile == 'gpt4-o-mini': await cl.ChatSettings( [ Select( id="OpenAI-Model", label="OpenAI - Model", values=["gpt4-o-mini"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im one of the best models openai have released and i am configured by two iranian boys to help you." ).send() # if chat_profile == 'Image-Generation': # image = cl.Image(path='cat.png', name="result", display="inline") # await cl.Message( # content="I can make high quality & resoloution images for you, This is an example of what i can do!", # elements=[image], # ).send() if chat_profile == 'GPT-4': await cl.ChatSettings( [ Select( id="OpenAI-Model", label="OpenAI - Model", values=["gpt-4"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im OpenAI's latest and biggest model. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() if chat_profile == 'gpt-3.5-turbo': await cl.ChatSettings( [ Select( id="OpenAI-Model", label="OpenAI - Model", values=["gpt-3.5-turbo"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() # if chat_profile == 'GPT-3.5-turbo-0125': # await cl.ChatSettings( # [ # Select( # id="OpenAI-Model", # label="OpenAI - Model", # values=["gpt-3.5-turbo-0125"], # initial_index=0, # ), # Slider( # id="Temperature", # label="Model Temperature", # initial=0.7, # min=0, # max=1, # step=0.1, # ), # ] # ).send() # await cl.Message( # content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " # ).send() if chat_profile == 'gpt-3.5-turbo-1106': await cl.ChatSettings( [ Select( id="OpenAI-Model", label="OpenAI - Model", values=["gpt-3.5-turbo-1106"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() # if chat_profile == 'davinci-002': # await cl.ChatSettings( # [ # Select( # id="OpenAI-Model", # label="OpenAI - Model", # values=["davinci-002"], # initial_index=0, # ), # Slider( # id="Temperature", # label="Model Temperature", # initial=0.7, # min=0, # max=1, # step=0.1, # ), # ] # ).send() # await cl.Message( # content="Im one of the OpenAI's models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " # ).send() if chat_profile == 'TTS': await cl.Message( content="Im TTS. of the best models OpenAI ever created. i can convert text to speech! . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() if chat_profile == 'Qwen2-57B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen2-57B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens second generation second large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Qwen2-7B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen2-7B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens second generation third large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Qwen2-1.5B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen2-1.5B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens second generation small model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Qwen2-0.5B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen2-0.5B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens second generation small model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Qwen1.5-110B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen1.5-110B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens 1.5th generation Large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Qwen1.5-72B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen1.5-72B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens 1.5th generation second Large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Qwen1.5-32B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen1.5-32B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens 1.5th generation third Large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Qwen1.5-2.7B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen1.5-2.7B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens 1.5th generation small model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Qwen-72B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen-72B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens open source Ai model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Qwen-14B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen-14B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens open source Ai model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Qwen-7B': await cl.ChatSettings( [ Select( id="Qwen-Model", label="Qwen - Model", values=["Qwen-7B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content='Im Qwens open source Ai model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!', ).send() if chat_profile == 'Llama-3.1-405B': await cl.ChatSettings( [ Select( id="Meta-Model", label="Meta - Model", values=["Llama-3.1-405B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im the big Llama-3.1!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() if chat_profile == 'Llama-3.1-70B': await cl.ChatSettings( [ Select( id="Meta-Model", label="Meta - Model", values=["Llama-3.1-70B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im the second-big Llama-3.1!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() if chat_profile == 'Llama-3.1-8B': await cl.ChatSettings( [ Select( id="Meta-Model", label="Meta - Model", values=["Llama-3.1-8B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im the small Llama-3.1!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() if chat_profile == 'Llama-3-70B': await cl.ChatSettings( [ Select( id="Meta-Model", label="Meta - Model", values=["Llama-3-70B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im the big Llama-3!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() if chat_profile == 'Llama-3-8B': await cl.ChatSettings( [ Select( id="Meta-Model", label="Meta - Model", values=["Llama-3-8B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im The small Llama!. one of the best open source models released by Meta! i am the small version of meta's open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() if chat_profile == 'gemma2-9B': await cl.ChatSettings( [ Select( id="Google-Model", label="Google - Model", values=["Gemma-9B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im Gemma2. the 9B version of google second generation open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() if chat_profile == 'gemma-7B': await cl.ChatSettings( [ Select( id="Google-Model", label="Google - Model", values=["Gemma-7B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im Gemma. the small version of google open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() if chat_profile == 'zephyr-7B': await cl.ChatSettings( [ Select( id="zephyr-Model", label="zephyr - Model", values=["zephyr-7B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im Zephyr. One of the best open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() if chat_profile == 'mistral-7B': await cl.ChatSettings( [ Select( id="Mistral-Model", label="Mistral - Model", values=["Mistral-7B"], initial_index=0, ), Slider( id="Temperature", label="Model Temperature", initial=0.7, min=0, max=1, step=0.1, ), ] ).send() await cl.Message( content="Im Mistral. the small version of Mistral Family. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " ).send() # if chat_profile == 'Toka-353M': # await cl.ChatSettings( # [ # Select( # id="PartAI-Model", # label="PartAI - Model", # values=["TokaBert-353M"], # initial_index=0, # ), # Slider( # id="Temperature", # label="Model Temperature", # initial=0.7, # min=0, # max=1, # step=0.1, # ), # ] # ).send() # await cl.Message( # content="Im Toka. An opens source persian LLM . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? you should ask me your questions like : the capital of england is " # ).send() @cl.on_message async def main(message: cl.Message): chat_profile = cl.user_session.get("chat_profile") if not chat_profile or chat_profile == 'None': await cl.Message( content="Please select a model first." ).send() return if chat_profile == 'neural-brain-AI': completion = openai_client.chat.completions.create( model="ft:gpt-3.5-turbo-1106:nb:aria1:9UWDrLJK", messages=[ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, {"role": "user", "content": message.content} ] ) model_response = completion.choices[0].message.content await cl.Message( content=model_response ).send() elif chat_profile == "Dorna-AI": result = hf_text_client.predict( message=message.content, request="your name is Dorna,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!", param_3=512, param_4=0.7, param_5=0.95, api_name="/chat" ) model_response = result.strip("") await cl.Message( content=model_response ).send() elif chat_profile == "gpt4-o-mini": completion = openai_client.chat.completions.create( model="gpt-4o-mini", messages=[ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, {"role": "user", "content": message.content} ] ) model_response = completion.choices[0].message.content await cl.Message( content=model_response ).send() # elif chat_profile == 'Image-Generation': # result = hf_image_client.predict( # prompt=message.content, # negative_prompt="", # seed=0, # randomize_seed=True, # width=512, # height=512, # guidance_scale=0, # num_inference_steps=2, # api_name="/infer" # ) # image = cl.Image(path=result, name="result", display="inline") # await cl.Message( # content="This message has an image!", # elements=[image], # ).send() elif chat_profile == 'GPT-4': completion = openai_client.chat.completions.create( model="gpt-4", messages=[ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, {"role": "user", "content": message.content} ] ) model_response = completion.choices[0].message.content await cl.Message( content=model_response ).send() elif chat_profile == 'gpt-3.5-turbo': completion = openai_client.chat.completions.create( model="gpt-3.5-turbo", messages=[ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, {"role": "user", "content": message.content} ] ) model_response = completion.choices[0].message.content await cl.Message( content=model_response ).send() elif chat_profile == 'GPT-3.5-turbo-0125': completion = openai_client.chat.completions.create( model="GPT-3.5-turbo-0125", messages=[ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, {"role": "user", "content": message.content} ] ) model_response = completion.choices[0].message.content await cl.Message( content=model_response ).send() elif chat_profile == 'gpt-3.5-turbo-1106': completion = openai_client.chat.completions.create( model="gpt-3.5-turbo-1106", messages=[ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, {"role": "user", "content": message.content} ] ) model_response = completion.choices[0].message.content await cl.Message( content=model_response ).send() # elif chat_profile == 'davinci-002': # completion = openai_client.chat.completions.create( # model="davinci-002", # messages=[ # {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, # {"role": "user", "content": message.content} # ] # ) # model_response = completion.choices[0].message.content # await cl.Message( # content=model_response # ).send() elif chat_profile == 'TTS': response = openai_client.audio.speech.create( model="tts-1", voice="alloy", input=message.content, ) response.stream_to_file("output.mp3") elements = [ cl.Audio(name="output.mp3", path="./output.mp3", display="inline"), ] await cl.Message( content="Here it is the response!", elements=elements, ).send() elif chat_profile == 'Qwen2-57B': client = Client("Qwen/Qwen2-57b-a14b-instruct-demo", hf_token=hf_token) result = client.predict( query=message.content, system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust", api_name="/model_chat" ) await cl.Message( content=result ).send() elif chat_profile == 'Llama-3.1-405B': client = InferenceClient( "meta-llama/Meta-Llama-3.1-405B-Instruct", token=f'{hf_token_llama_3_1}', ) for message in client.chat_completion( messages=[{"role": "user", "content": f'{message.content}'}], max_tokens=500, stream=True, ): complete_message += message.choiches[0].delta.content await cl.Message( content=complete_message, ).send() elif chat_profile == 'Llama-3.1-70B': completion = groq_client.chat.completions.create( model="llama-3.1-70b-versatile", messages=[ { "role": "user", "content": message.content } ], temperature=1, max_tokens=1024, top_p=1, stream=True, stop=None, ) complete_content = "" # Iterate over each chunk for chunk in completion: # Retrieve the content from the current chunk content = chunk.choices[0].delta.content # Check if the content is not None before concatenating it if content is not None: complete_content += content # Send the concatenated content as a message await cl.Message(content=complete_content).send() elif chat_profile == 'Llama-3.1-8B': completion = groq_client.chat.completions.create( model="llama-3.1-8b-instant", messages=[ { "role": "user", "content": message.content } ], temperature=1, max_tokens=1024, top_p=1, stream=True, stop=None, ) complete_content = "" # Iterate over each chunk for chunk in completion: # Retrieve the content from the current chunk content = chunk.choices[0].delta.content # Check if the content is not None before concatenating it if content is not None: complete_content += content # Send the concatenated content as a message await cl.Message(content=complete_content).send() elif chat_profile == 'Llama-3-70B': completion = groq_client.chat.completions.create( model="llama3-70b-8192", messages=[ { "role": "user", "content": message.content } ], temperature=1, max_tokens=1024, top_p=1, stream=True, stop=None, ) complete_content = "" # Iterate over each chunk for chunk in completion: # Retrieve the content from the current chunk content = chunk.choices[0].delta.content # Check if the content is not None before concatenating it if content is not None: complete_content += content # Send the concatenated content as a message await cl.Message(content=complete_content).send() elif chat_profile == 'Llama-3-8B': completion = groq_client.chat.completions.create( model="llama3-8b-8192", messages=[ { "role": "user", "content": message.content } ], temperature=1, max_tokens=1024, top_p=1, stream=True, stop=None, ) complete_content = "" # Iterate over each chunk for chunk in completion: # Retrieve the content from the current chunk content = chunk.choices[0].delta.content # Check if the content is not None before concatenating it if content is not None: complete_content += content # Send the concatenated content as a message await cl.Message(content=complete_content).send() elif chat_profile == 'gemma2-9B': completion = groq_client.chat.completions.create( model="gemma-9b-it", messages=[ { "role": "user", "content": message.content } ], temperature=1, max_tokens=1024, top_p=1, stream=True, stop=None, ) complete_content = "" # Iterate over each chunk for chunk in completion: # Retrieve the content from the current chunk content = chunk.choices[0].delta.content # Check if the content is not None before concatenating it if content is not None: complete_content += content # Send the concatenated content as a message await cl.Message(content=complete_content).send() elif chat_profile == 'gemma-7B': completion = groq_client.chat.completions.create( model="gemma-7b-it", messages=[ { "role": "user", "content": message.content } ], temperature=1, max_tokens=1024, top_p=1, stream=True, stop=None, ) complete_content = "" # Iterate over each chunk for chunk in completion: # Retrieve the content from the current chunk content = chunk.choices[0].delta.content # Check if the content is not None before concatenating it if content is not None: complete_content += content # Send the concatenated content as a message await cl.Message(content=complete_content).send() elif chat_profile == "zephyr-7B": result = hf_text_client.predict( message=message.content, request="your name is zephyr,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!", param_3=512, param_4=0.7, param_5=0.95, api_name="/chat" ) model_response = result.strip("") await cl.Message( content=model_response ).send() elif chat_profile == 'mistral-7B': completion = groq_client.chat.completions.create( model="mixtral-8x7b-32768", messages=[ { "role": "user", "content": message.content } ], temperature=1, max_tokens=1024, top_p=1, stream=True, stop=None, ) complete_content = "" for chunk in completion: content = chunk.choices[0].delta.content if content is not None: complete_content += content await cl.Message(content=complete_content).send() # elif chat_profile == 'Toka-353M': # output = query({ # "inputs": message.content, # }) # await cl.Message( # content=output[0]['sequence'] # ).send() elif chat_profile == 'Aya-23B': stream = co.chat_stream( model='c4ai-aya-23', message=message.content, temperature=0.3, # chat_history=[{"role": "User", "message": "Hello"}, {"role": "Chatbot", "message": "Hello! How can I help you today?"}, {"role": "User", "message": "Hi"}, {"role": "User", "message": "hello"}], prompt_truncation='OFF', connectors=[], ) complete_content = '' for event in stream: if event.event_type == 'text-generation': complete_content += event.text await cl.Message(content=complete_content).send() @cl.on_settings_update async def setup_agent(settings): print("on_settings_update", settings)