Spaces:
Runtime error
Runtime error
Danil
commited on
Commit
β’
4932cf0
1
Parent(s):
11a3f9d
- app.py +23 -28
- requirements.txt +2 -1
- server.py +1 -1
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
import streamlit as st
|
2 |
import requests
|
3 |
import os
|
|
|
4 |
import json
|
5 |
|
6 |
def start_server():
|
@@ -10,32 +10,27 @@ def start_server():
|
|
10 |
if os.environ.get('SSTART') != "1":
|
11 |
start_server()
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
)
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
txt = st.text_area('Write code here', '''import os
|
21 |
-
|
22 |
-
def remove_file(file):''', height=400)
|
23 |
-
|
24 |
-
gen = st.button('Generate')
|
25 |
-
|
26 |
-
c = st.code('')
|
27 |
-
|
28 |
-
max_length = st.slider('max_length', 1, 2048, 128)
|
29 |
-
top_k = st.slider('top_k', 0, 100, 50)
|
30 |
-
top_p = st.slider('top_p', 0.0, 1.0, 0.9)
|
31 |
-
temperature = st.slider('temperature', 0.0, 1.0, 0.6)
|
32 |
-
num_beams = st.slider('num_beams', 1, 100, 1)
|
33 |
-
repetition_penalty = st.slider('repetition_penalty', 1.0, 10.0, 1.0)
|
34 |
-
|
35 |
-
|
36 |
-
if gen:
|
37 |
-
c.code('Generating...')
|
38 |
-
req = f"http://0.0.0.0:8080?input_text={txt}&top_p={top_p}&top_k={top_k}&temperature={temperature}&num_beams={num_beams}&repetition_penalty={repetition_penalty}&max_length={max_length}"
|
39 |
-
res = requests.get(req)
|
40 |
-
print('ok')
|
41 |
-
c.code(json.loads(res.text))
|
|
|
|
|
1 |
import requests
|
2 |
import os
|
3 |
+
import gradio as gr
|
4 |
import json
|
5 |
|
6 |
def start_server():
|
|
|
10 |
if os.environ.get('SSTART') != "1":
|
11 |
start_server()
|
12 |
|
13 |
+
def completion(prompt,max_tokens,temperature,top_k,top_p):
|
14 |
+
req = f"http://0.0.0.0:8080?input_text={prompt}&top_p={top_p}&top_k={top_k}&temperature={temperature}&max_length={prompt}"
|
15 |
+
g = requests.get(req).json()
|
16 |
+
return g['text']
|
17 |
+
|
18 |
+
demo = gr.Interface(
|
19 |
+
fn=completion,
|
20 |
+
inputs=[
|
21 |
+
gr.inputs.Textbox(lines=10,placeholder='Write some code..'),
|
22 |
+
gr.inputs.Slider(10,200,10,100,'Max Tokens',False),
|
23 |
+
gr.inputs.Slider(0,1.0,0.1,1.0,'temperature',False),
|
24 |
+
gr.inputs.Slider(0,50,1,40,'top_k',True),
|
25 |
+
gr.inputs.Slider(0,1.0,0.1,0.9,'top_p',True)
|
26 |
+
],
|
27 |
+
outputs="text",
|
28 |
+
theme='dark-huggingface',
|
29 |
+
title='Solo-Coder',
|
30 |
+
description='Build by Ansh and β€οΈ',
|
31 |
+
allow_flagging=False,
|
32 |
+
|
33 |
)
|
34 |
|
35 |
+
if __name__ == "__main__":
|
36 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -2,4 +2,5 @@ transformers
|
|
2 |
fastapi
|
3 |
uvicorn
|
4 |
torch
|
5 |
-
requests
|
|
|
|
2 |
fastapi
|
3 |
uvicorn
|
4 |
torch
|
5 |
+
requests
|
6 |
+
gradio
|
server.py
CHANGED
@@ -14,4 +14,4 @@ def read_root(input_text, max_length, top_p, top_k, num_beams, temperature, repe
|
|
14 |
inpt = tokenizer.encode(input_text, return_tensors="pt")
|
15 |
out = model.generate(inpt, max_length=int(max_length), top_p=float(top_p), top_k=float(top_k), temperature=float(temperature), num_beams=int(num_beams), repetition_penalty=float(repetition_penalty))
|
16 |
res = tokenizer.decode(out[0])
|
17 |
-
return {res}
|
|
|
14 |
inpt = tokenizer.encode(input_text, return_tensors="pt")
|
15 |
out = model.generate(inpt, max_length=int(max_length), top_p=float(top_p), top_k=float(top_k), temperature=float(temperature), num_beams=int(num_beams), repetition_penalty=float(repetition_penalty))
|
16 |
res = tokenizer.decode(out[0])
|
17 |
+
return {"text": res}
|