zamal commited on
Commit
3121096
1 Parent(s): 37a32e9

Upload myspace.py

Browse files
Files changed (1) hide show
  1. myspace.py +78 -0
myspace.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import json
5
+
6
+ # Load Hugging Face token from environment variable
7
+ hf_token = os.environ.get("HF_TOKEN")
8
+
9
+ def query_llama(text):
10
+ API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
11
+ headers = {"Authorization": f"Bearer {hf_token}"} # Use the token from environment variable
12
+ payload = {"inputs": text}
13
+
14
+ try:
15
+ response = requests.post(API_URL, headers=headers, json=payload)
16
+ response.raise_for_status() # Raises an exception for 4XX/5XX errors
17
+ data = response.json()
18
+ print("Llama Response Data:", json.dumps(data, indent=2)) # Print formatted JSON response
19
+
20
+ # Check if the response is a list containing a dictionary
21
+ if isinstance(data, list) and len(data) > 0 and isinstance(data[0], dict):
22
+ return data[0].get('generated_text', "Error: 'generated_text' key not found in response")
23
+ else:
24
+ return "Error: Invalid response format"
25
+ except requests.RequestException as e:
26
+ print("HTTP Error:", e)
27
+ return "Error: HTTP request failed"
28
+ except KeyError:
29
+ print("Key Error: 'generated_text' not in response")
30
+ print("Response content:", response.text)
31
+ return "Error: 'generated_text' key not found in response"
32
+
33
+
34
+ def query_mistral(text):
35
+ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
36
+ headers = {"Authorization": f"Bearer {hf_token}"} # Use the token from environment variable
37
+ payload = {"inputs": text}
38
+
39
+ try:
40
+ response = requests.post(API_URL, headers=headers, json=payload)
41
+ response.raise_for_status() # Raises an exception for 4XX/5XX errors
42
+ data = response.json()
43
+ print("Mistral Response Data:", json.dumps(data, indent=2)) # Print formatted JSON response
44
+
45
+ # Check if the response is a list containing a dictionary
46
+ if isinstance(data, list) and len(data) > 0 and isinstance(data[0], dict):
47
+ return data[0].get('generated_text', "Error: 'generated_text' key not found in response")
48
+ else:
49
+ return "Error: Invalid response format"
50
+ except requests.RequestException as e:
51
+ print("HTTP Error:", e)
52
+ return "Error: HTTP request failed"
53
+ except KeyError:
54
+ print("Key Error: 'generated_text' not in response")
55
+ print("Response content:", response.text)
56
+ return "Error: 'generated_text' key not found in response"
57
+
58
+
59
+ def chat_with_models(text):
60
+ llama_response = query_llama(text)
61
+ mistral_response = query_mistral(text)
62
+
63
+ return llama_response, mistral_response
64
+
65
+
66
+ with gr.Blocks() as demo:
67
+ gr.Markdown("<h1>🚀 Mistral 7B vs LLama3 8B 🦙</h1>")
68
+ gr.Markdown("<h3> 🕹️ Compare the performance and responses of two powerful models, Mistral 7B and LLama3 8B instruct. Type your questions or prompts below and see how each model responds to the same input 👾 </h3>")
69
+ with gr.Row():
70
+ input_text = gr.Textbox(label="Enter your prompt here:", placeholder="Type something...", lines=2)
71
+ submit_button = gr.Button("Submit")
72
+ output_llama = gr.Textbox(label="Llama 3 8B 👾", placeholder="", lines=10, interactive=False)
73
+ output_mistral = gr.Textbox(label="Mistral 7B 🌠", placeholder="", lines=10, interactive=False)
74
+
75
+ submit_button.click(fn=chat_with_models, inputs=input_text, outputs=[output_llama, output_mistral])
76
+
77
+ if __name__ == "__main__":
78
+ demo.launch()