Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -25,9 +25,25 @@ def response_from_llam3(query):
|
|
25 |
|
26 |
)
|
27 |
return response.choices[0].message.content
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
iface = gr.Interface(
|
32 |
|
33 |
fn=response_from_llam3,
|
@@ -43,16 +59,23 @@ iface = gr.Interface(
|
|
43 |
)
|
44 |
iface.launch()
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
-
|
56 |
|
57 |
-
|
58 |
-
|
|
|
25 |
|
26 |
)
|
27 |
return response.choices[0].message.content
|
28 |
+
|
29 |
+
def response_from_mistral(query):
|
30 |
+
messages = [
|
31 |
+
{
|
32 |
+
"role" : "system",
|
33 |
+
"content": "You are an helpul Assistant who has plently of Knowledge on Ayur Veda. If the message is Hi or any greeting say namste how can i assist you "
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"role": "user",
|
37 |
+
"content": "What is the answer to {}".format(query)
|
38 |
+
}
|
39 |
+
]
|
40 |
|
41 |
+
response = client.chat.completions.create(
|
42 |
+
messages = messages,
|
43 |
+
model = "mixtral-8x7b-32768"
|
44 |
+
|
45 |
+
)
|
46 |
+
return response.choices[0].message.content
|
47 |
iface = gr.Interface(
|
48 |
|
49 |
fn=response_from_llam3,
|
|
|
59 |
)
|
60 |
iface.launch()
|
61 |
|
62 |
+
def chat_with_models(text):
|
63 |
+
llama_response = response_from_llam3(text)
|
64 |
+
mistral_response =response_from_mistral(text)
|
65 |
+
|
66 |
+
return llama_response, mistral_response
|
67 |
+
|
68 |
+
|
69 |
+
with gr.Blocks() as demo:
|
70 |
+
gr.Markdown("<h1>π Mistral 7B vs LLama3 8B π¦</h1>")
|
71 |
+
gr.Markdown("<h3> πΉοΈ Compare the performance and responses of two powerful models, Mistral 7B and LLama3 8B instruct. Type your questions or prompts below and see how each model responds to the same input πΎ </h3>")
|
72 |
+
with gr.Row():
|
73 |
+
input_text = gr.Textbox(label="Enter your prompt here:", placeholder="Type something...", lines=2)
|
74 |
+
submit_button = gr.Button("Submit")
|
75 |
+
output_llama = gr.Textbox(label="Llama 3 8B πΎ", placeholder="", lines=10, interactive=False)
|
76 |
+
output_mistral = gr.Textbox(label="Mistral 7B π ", placeholder="", lines=10, interactive=False)
|
77 |
|
78 |
+
submit_button.click(fn=chat_with_models, inputs=input_text, outputs=[output_llama, output_mistral])
|
79 |
|
80 |
+
if __name__ == "__main__":
|
81 |
+
demo.launch()
|