Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +91 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import tempfile
|
3 |
+
import pytest
|
4 |
+
import io
|
5 |
+
import sys
|
6 |
+
import os
|
7 |
+
import requests
|
8 |
+
|
9 |
+
api_base = "https://api.endpoints.anyscale.com/v1"
|
10 |
+
token = os.environ["OPENAI_API_KEY"]
|
11 |
+
url = f"{api_base}/chat/completions"
|
12 |
+
|
13 |
+
def generate_test(code):
|
14 |
+
s = requests.Session()
|
15 |
+
message = "Write me a test of this function\n{}".format(code)
|
16 |
+
system_prompt = """
|
17 |
+
You are a helpful coding assistant.
|
18 |
+
Your job is to help people write unit tests for the python code.
|
19 |
+
If inputs and outputs are provided, please return a set of unit tests that will
|
20 |
+
verify that the function will produce the corect outputs. Also provide tests to
|
21 |
+
handle base and edge cases.
|
22 |
+
"""
|
23 |
+
|
24 |
+
body = {
|
25 |
+
"model": "meta-llama/Llama-2-70b-chat-hf",
|
26 |
+
"messages": [
|
27 |
+
{"role": "system", "content": system_prompt},
|
28 |
+
{"role": "user", "content": message},
|
29 |
+
],
|
30 |
+
"temperature": 0.7,
|
31 |
+
}
|
32 |
+
|
33 |
+
with s.post(url, headers={"Authorization": f"Bearer {token}"}, json=body) as resp:
|
34 |
+
response = resp.json()["choices"][0]
|
35 |
+
|
36 |
+
if response["finish_reason"] != "stop":
|
37 |
+
raise ValueError("Print please try again -- response was not finished!")
|
38 |
+
|
39 |
+
split_response = response["message"]["content"].split("```")
|
40 |
+
if len(split_response) != 3:
|
41 |
+
raise ValueError("Please try again -- response generated too many code blocks!")
|
42 |
+
|
43 |
+
|
44 |
+
def execute_code(code, test):
|
45 |
+
|
46 |
+
# Capture the standard output in a StringIO object
|
47 |
+
old_stdout = sys.stdout
|
48 |
+
new_stdout = io.StringIO()
|
49 |
+
sys.stdout = new_stdout
|
50 |
+
|
51 |
+
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.py') as f:
|
52 |
+
f.writelines(code)
|
53 |
+
f.writelines(test)
|
54 |
+
f.flush()
|
55 |
+
temp_path = f.name
|
56 |
+
pytest.main(["-x", temp_path])
|
57 |
+
|
58 |
+
# Restore the standard output
|
59 |
+
sys.stdout = old_stdout
|
60 |
+
|
61 |
+
# Get the captured output from the StringIO object
|
62 |
+
output = new_stdout.getvalue()
|
63 |
+
return output
|
64 |
+
|
65 |
+
example = """
|
66 |
+
def prime_factors(n):
|
67 |
+
i = 2
|
68 |
+
factors = []
|
69 |
+
while i * i <= n:
|
70 |
+
if n % i:
|
71 |
+
i += 1
|
72 |
+
else:
|
73 |
+
n //= i
|
74 |
+
factors.append(i)
|
75 |
+
if n > 1:
|
76 |
+
factors.append(n)
|
77 |
+
return factors
|
78 |
+
"""
|
79 |
+
|
80 |
+
with gr.Blocks() as demo:
|
81 |
+
gr.Markdown("<h1><center>Llama_test: generate unit test for your Python code</center></h1>")
|
82 |
+
with gr.Row():
|
83 |
+
code_input = gr.Code(example, label="Provide the code of the function you want to test")
|
84 |
+
generate_btn = gr.Button("Generate test")
|
85 |
+
with gr.Row():
|
86 |
+
code_output = gr.Code()
|
87 |
+
code_output2 = gr.Code()
|
88 |
+
|
89 |
+
generate_btn.click(execute_code, outputs=code_output)
|
90 |
+
if __name__ == "__main__":
|
91 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.40.1
|
2 |
+
pytest==7.4.0
|
3 |
+
Requests==2.31.0
|