saritha commited on
Commit
62afa46
β€’
1 Parent(s): f2d54d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -13
app.py CHANGED
@@ -25,9 +25,25 @@ def response_from_llam3(query):
25
 
26
  )
27
  return response.choices[0].message.content
28
- gr.Markdown("<h1>πŸš€ Ayurveda Mate πŸ¦™</h1>")
29
- gr.Markdown("<h3> πŸ•ΉοΈ Type your questions or prompts below and see how each model responds to the same input πŸ‘Ύ </h3>")
 
 
 
 
 
 
 
 
 
 
30
 
 
 
 
 
 
 
31
  iface = gr.Interface(
32
 
33
  fn=response_from_llam3,
@@ -43,16 +59,23 @@ iface = gr.Interface(
43
  )
44
  iface.launch()
45
 
46
- # with gr.Blocks() as demo:
47
- # gr.Markdown("<h1>πŸš€ Ayurveda Mate πŸ¦™</h1>")
48
- # gr.Markdown("<h3> πŸ•ΉοΈ Type your questions or prompts below and see how each model responds to the same input πŸ‘Ύ </h3>")
49
- # with gr.Row():
50
- # input_text = gr.Textbox(label="Enter your prompt here:", placeholder="Type something...", lines=2)
51
- # submit_button = gr.Button("Submit")
52
- # output_llama = gr.Textbox(label="Llama 3 8B πŸ‘Ύ", placeholder="", lines=10, interactive=False)
53
- # # output_mistral = gr.Textbox(label="Mistral 7B 🌠", placeholder="", lines=10, interactive=False)
 
 
 
 
 
 
 
54
 
55
- # submit_button.click(fn=response_from_llam3, inputs="text", outputs="text")
56
 
57
- # if __name__ == "__main__":
58
- # demo.launch()
 
25
 
26
  )
27
  return response.choices[0].message.content
28
+
29
+ def response_from_mistral(query):
30
+ messages = [
31
+ {
32
+ "role" : "system",
33
+ "content": "You are an helpul Assistant who has plently of Knowledge on Ayur Veda. If the message is Hi or any greeting say namste how can i assist you "
34
+ },
35
+ {
36
+ "role": "user",
37
+ "content": "What is the answer to {}".format(query)
38
+ }
39
+ ]
40
 
41
+ response = client.chat.completions.create(
42
+ messages = messages,
43
+ model = "mixtral-8x7b-32768"
44
+
45
+ )
46
+ return response.choices[0].message.content
47
  iface = gr.Interface(
48
 
49
  fn=response_from_llam3,
 
59
  )
60
  iface.launch()
61
 
62
+ def chat_with_models(text):
63
+ llama_response = response_from_llam3(text)
64
+ mistral_response =response_from_mistral(text)
65
+
66
+ return llama_response, mistral_response
67
+
68
+
69
+ with gr.Blocks() as demo:
70
+ gr.Markdown("<h1>πŸš€ Mistral 7B vs LLama3 8B πŸ¦™</h1>")
71
+ gr.Markdown("<h3> πŸ•ΉοΈ Compare the performance and responses of two powerful models, Mistral 7B and LLama3 8B instruct. Type your questions or prompts below and see how each model responds to the same input πŸ‘Ύ </h3>")
72
+ with gr.Row():
73
+ input_text = gr.Textbox(label="Enter your prompt here:", placeholder="Type something...", lines=2)
74
+ submit_button = gr.Button("Submit")
75
+ output_llama = gr.Textbox(label="Llama 3 8B πŸ‘Ύ", placeholder="", lines=10, interactive=False)
76
+ output_mistral = gr.Textbox(label="Mistral 7B 🌠", placeholder="", lines=10, interactive=False)
77
 
78
+ submit_button.click(fn=chat_with_models, inputs=input_text, outputs=[output_llama, output_mistral])
79
 
80
+ if __name__ == "__main__":
81
+ demo.launch()