santhakumar commited on
Commit
3603925
1 Parent(s): bb71653

Added app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+
4
+ def generate_message(prompt, model):
5
+ url = 'https://api.together.xyz/inference'
6
+ headers = {
7
+ 'Authorization': 'Bearer ee16b2ea8440ef7142ac5e21f5d096ee2dfb9c0c206acff4818a35b91ae59cef',
8
+ 'accept': 'application/json',
9
+ 'content-type': 'application/json'
10
+ }
11
+ data = {
12
+ "model": model,
13
+ "prompt": prompt,
14
+ "max_tokens": 512,
15
+ "stop": [
16
+ "</s>",
17
+ "[INST]",
18
+ "<<SYS>>",
19
+ "<</SYS>>",
20
+ "[/INST]",
21
+ "<s>"
22
+ ],
23
+ "temperature": 0.75,
24
+ "top_p": 0.2,
25
+ "top_k": 10
26
+ }
27
+ response = requests.post(url, headers=headers, json=data)
28
+ result = response.json()
29
+ return result['output']['choices'][0]['text'].strip()
30
+
31
+ def generate_message_with_submit(input_text, model):
32
+ return generate_message(input_text, model)
33
+
34
+ iface = gr.Interface(
35
+ fn=generate_message_with_submit,
36
+ inputs=[
37
+ gr.TextArea(lines=20, label="Enter Prompt"),
38
+ gr.Radio(['meta-llama/Llama-2-70b-chat-hf', 'iamplus/model1-70b'], label="Model",value='meta-llama/Llama-2-70b-chat-hf')
39
+ ],
40
+ outputs=gr.TextArea(label="AI Assistant Message"),
41
+ title="AI Assistant Message Generator",
42
+ description="Generate an appropriate message for an AI assistant to let the user know that it's working on a task and will get back soon.",
43
+ allow_flagging=False
44
+ )
45
+
46
+ iface.launch(share=True)