Spaces:
Runtime error
Runtime error
mcguinnessa
commited on
Commit
•
362b9f8
1
Parent(s):
191ebaf
Upload folder using huggingface_hub
Browse files- .gitignore +12 -0
- README.md +5 -9
- Test_Case_definition.txt +9 -0
- format.py +18 -0
- genai_web.py +454 -0
- json_format.py +12 -0
- requirements.txt +3 -0
.gitignore
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.workspaces
|
2 |
+
|
3 |
+
*.swp
|
4 |
+
*.tst
|
5 |
+
|
6 |
+
|
7 |
+
# Eclipse m2e generated files
|
8 |
+
# Eclipse Core
|
9 |
+
.project
|
10 |
+
# JDT-specific (Eclipse Java Development Tools)
|
11 |
+
.classpath
|
12 |
+
|
README.md
CHANGED
@@ -1,12 +1,8 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
|
|
|
1 |
---
|
2 |
+
title: genAIWeb
|
3 |
+
app_file: genai_web.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 4.25.0
|
|
|
|
|
6 |
---
|
7 |
+
# genAIWeb
|
8 |
+
GenAI Web interface
|
Test_Case_definition.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
A Test Case is defined as having the following fields:
|
2 |
+
|
3 |
+
Number, abbreviated to No., and is the number of the test case in the table or document.
|
4 |
+
Heading. A useful short description of the test case.
|
5 |
+
Description. A summary of the test case.
|
6 |
+
ID. An alphanumeric identifier, unique to the test and derived from the element under test and the number field.
|
7 |
+
Prerequisites. Describes the preconditions needed for the tests to be executed.
|
8 |
+
Test Steps. This is a series of at least 3 steps that clearly describes how to execute the test case. Each step shall be numbered.
|
9 |
+
Expected Results. This describes the expected outcomes for each of the steps itemised in Test Steps.
|
format.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from abc import ABC, abstractmethod
|
3 |
+
|
4 |
+
class Format(ABC):
|
5 |
+
# def __init__(self, param2):
|
6 |
+
# self.param1 = param1
|
7 |
+
# self.param2 = param2
|
8 |
+
|
9 |
+
self.json = None
|
10 |
+
self.html = None
|
11 |
+
self.text = None
|
12 |
+
|
13 |
+
|
14 |
+
@abstractmethod
|
15 |
+
def asHTML(self):
|
16 |
+
# Method implementation here
|
17 |
+
pass
|
18 |
+
|
genai_web.py
ADDED
@@ -0,0 +1,454 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
|
3 |
+
import websocket
|
4 |
+
import json
|
5 |
+
from contextlib import closing
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from uuid import uuid4
|
8 |
+
import re
|
9 |
+
import time
|
10 |
+
import datetime
|
11 |
+
|
12 |
+
#from langchain import PromptTemplate
|
13 |
+
from langchain_core.prompts import PromptTemplate
|
14 |
+
|
15 |
+
import gradio as gr
|
16 |
+
import os
|
17 |
+
|
18 |
+
WORKSPACE_ID = os.environ['WORKSPACE_ID']
|
19 |
+
SOCKET_URL = "wss://datw9crxl8.execute-api.us-east-1.amazonaws.com/socket/"
|
20 |
+
API_TOKEN = os.environ['API_KEY']
|
21 |
+
|
22 |
+
TESTS_PER_CALL = 10
|
23 |
+
|
24 |
+
FORMAT_OPTIONS = ["HTML", "CSV", "Excel", "JSON", "Text"]
|
25 |
+
|
26 |
+
#g_tests_generated = False
|
27 |
+
|
28 |
+
#MODEL = "mistral.mixtral-8x7b-instruct-v0:1"
|
29 |
+
#MODEL = "ai21.j2-ultra-v1"
|
30 |
+
#MODEL = "anthropic.claude-v2:1"
|
31 |
+
#MODEL = "amazon.titan-tg1-large"
|
32 |
+
#MODEL = "anthropic.claude-3-sonnet-20240229-v1:0"
|
33 |
+
#MODEL = "anthropic.claude-3-haiku-20240307-v1:0"
|
34 |
+
#MODEL = "meta.llama2-70b-chat-v1"
|
35 |
+
#MODEL = "amazon.titan-tg1-large"
|
36 |
+
|
37 |
+
|
38 |
+
model_dict = {"mistral.mixtral-8x7b-instruct-v0:1" : 4096,
|
39 |
+
"mistral.mistral-large-2402-v1:0" : 4096,
|
40 |
+
# "mistral.mistral-7b-instruct-v0:2" : 4096,
|
41 |
+
"meta.llama2-70b-chat-v1" : 2048,
|
42 |
+
"meta.llama3-70b-instruct-v1:0" : 2048,
|
43 |
+
"meta.llama3-8b-instruct-v1:0" : 2048,
|
44 |
+
"ai21.j2-ultra-v1" : 4096,
|
45 |
+
# "anthropic.claude-3-sonnet-20240229-v1:0" : 4096,
|
46 |
+
# "anthropic.claude-3-haiku-20240307-v1:0" : 4096,
|
47 |
+
"amazon.titan-tg1-large" : 4096 }
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
HEADING_NO = "No."
|
52 |
+
HEADING_NAME = "Test Name"
|
53 |
+
HEADING_DESC = "Description"
|
54 |
+
HEADING_ID = "External Test ID"
|
55 |
+
HEADING_PRE = "Pre-Conditions"
|
56 |
+
HEADING_STEPS = "Test Steps"
|
57 |
+
HEADING_RESULTS = "Expected Results"
|
58 |
+
|
59 |
+
|
60 |
+
def validate_element(input):
|
61 |
+
rc = False
|
62 |
+
el_len = len(input)
|
63 |
+
if el_len < 32 and el_len > 0:
|
64 |
+
rc = True
|
65 |
+
|
66 |
+
return rc
|
67 |
+
|
68 |
+
def validate_service(input):
|
69 |
+
rc = False
|
70 |
+
if len(input) < 32:
|
71 |
+
rc = True
|
72 |
+
|
73 |
+
return rc
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
############################################################
|
79 |
+
#
|
80 |
+
# Generates the tests
|
81 |
+
#
|
82 |
+
############################################################
|
83 |
+
def generate_tests(model, element, service, format, temperature, topp, max_tokens, num_tests,
|
84 |
+
role, type):
|
85 |
+
|
86 |
+
session_id = uuid4()
|
87 |
+
|
88 |
+
service = str(service)
|
89 |
+
element = str(element)
|
90 |
+
print("Element:" + element)
|
91 |
+
print("Service(len):" + service + ":" + str(len(service)))
|
92 |
+
print("Format:" + format)
|
93 |
+
|
94 |
+
#rc = ["", "{}", "", gr.Button("Download", visible=True) ]
|
95 |
+
rc = ["", "{}", "", gr.Column(visible=True), None ]
|
96 |
+
|
97 |
+
if not validate_element(element):
|
98 |
+
return "Invalid Element input"
|
99 |
+
|
100 |
+
if not validate_service(service):
|
101 |
+
return "Invalid Service input"
|
102 |
+
|
103 |
+
if num_tests >= TESTS_PER_CALL:
|
104 |
+
num_tests_to_ask_for = TESTS_PER_CALL
|
105 |
+
else:
|
106 |
+
num_tests_to_ask_for = num_tests
|
107 |
+
|
108 |
+
secondary_target = ""
|
109 |
+
|
110 |
+
if 0 < len(service):
|
111 |
+
secondary_target = f" including the for the service {service}."
|
112 |
+
|
113 |
+
formatting_prefix = ""
|
114 |
+
formatting_suffix = ""
|
115 |
+
format_separator = ""
|
116 |
+
|
117 |
+
formatting = ""
|
118 |
+
if format == "HTML":
|
119 |
+
formatting = f"""Each test cases must be presented as row which can be added to a HTML table. Each row will be prefixed with <tr> & suffixed with </tr>. This table must easy to read. Do not include the tag <table>, do not generate the header row or use the <th> tags.
|
120 |
+
Here is an example of the desired output format <tr><td>{HEADING_NO}</td><td>{HEADING_NAME}</td><td>{HEADING_DESC}</td><td>{HEADING_ID}</td><td>{HEADING_PRE}</td><td>{HEADING_STEPS}</td><td>{HEADING_RESULTS}</td></tr>"""
|
121 |
+
|
122 |
+
formatting_prefix = f"<table><tr><th>{HEADING_NO}</th><th>{HEADING_NAME}</th><th>{HEADING_DESC}</th><th>{HEADING_ID}</th><th>{HEADING_PRE}</th><th>{HEADING_STEPS}</th><th>{HEADING_RESULTS}</th></tr>"
|
123 |
+
formatting_suffix = "</table>"
|
124 |
+
|
125 |
+
display_idx = 0
|
126 |
+
|
127 |
+
elif format == "JSON":
|
128 |
+
formatting = f"""The output must use strict JSON format. Each Test case will be a JSON object. Each of the fields will be property in the JSON object. Do not generate the enclosing "[" or "]" of the top level list.
|
129 |
+
Here is an example of the desire output format:
|
130 |
+
{{ "{HEADING_NO}" : "Value", "{HEADING_NAME}": "Value", "{HEADING_DESC}": "Value", "{HEADING_ID}": "Value", "{HEADING_PRE}": "Value", "{HEADING_STEPS}": "Value", "{HEADING_RESULTS}": "Value" }}"""
|
131 |
+
|
132 |
+
formatting_prefix = f"["
|
133 |
+
formatting_suffix = "]"
|
134 |
+
format_separator = ","
|
135 |
+
|
136 |
+
display_idx = 1
|
137 |
+
elif format == "CSV":
|
138 |
+
formatting = "The resultant test cases must be presented in the format of a CSV table"
|
139 |
+
display_idx = 2
|
140 |
+
elif format == "Excel":
|
141 |
+
formatting = "The resultant test cases must be presented in the format of a that can be easily pasted into a spreadsheet such as Excel"
|
142 |
+
display_idx = 2
|
143 |
+
elif format == "Text":
|
144 |
+
formatting = "The resultant test cases must be presented in the format of a table in plain text"
|
145 |
+
display_idx = 2
|
146 |
+
|
147 |
+
#prompt = f"""You are a {role}. Generate {num_tests_to_ask_for} unique test cases for {element} including the for the service {service}.
|
148 |
+
prompt = f"""You are a {role}. Generate {num_tests_to_ask_for} unique test cases for {element}{secondary_target}.
|
149 |
+
|
150 |
+
The test cases must be {type} test cases.
|
151 |
+
|
152 |
+
{formatting}
|
153 |
+
Do not generate any superfluous output that is not part of test case.
|
154 |
+
|
155 |
+
The test cases must contain the fields in the following order: {HEADING_NO}, {HEADING_NAME}, {HEADING_DESC}, {HEADING_ID}, {HEADING_PRE}, {HEADING_STEPS}, and {HEADING_RESULTS} as specified by the Test Case Definition.
|
156 |
+
|
157 |
+
"""
|
158 |
+
|
159 |
+
# The test cases must conform to the definition specified in your knowledge base.
|
160 |
+
# The test cases must conform to the definition specified.
|
161 |
+
# The test cases must conform to the definition of a Test Case in your understanding.
|
162 |
+
# The test cases must conform to your understanding of the definition of a test case.
|
163 |
+
# The test cases must conform to the definition of a test case provided.
|
164 |
+
# The test cases must conform to the definition specified in 'Test Definition.txt'
|
165 |
+
# The test cases must include the following elements, each corresponding to a heading: {HEADING_NO}, {HEADING_NAME}, {HEADING_DESC}, {HEADING_ID}, {HEADING_PRE}, {HEADING_STEPS}, and {HEADING_RESULTS}.
|
166 |
+
#
|
167 |
+
# {HEADING_NO}' is an abbreviation for number. This is a unique integer for each test case, starting from 1 and incrementing by 1 for each test case.
|
168 |
+
# {HEADING_NAME} is a useful short description of the test.
|
169 |
+
# {HEADING_DESC} is a summary of the test and should end in -{element}.
|
170 |
+
# {HEADING_ID} is an alpha-numeric ID, unique for each case and derived from {element}.
|
171 |
+
# {HEADING_PRE} describes the preconditions needed for the tests to be executed.
|
172 |
+
# {HEADING_STEPS} is a series of at least 3 steps that clearly describes how to execute the test case. Each step must be numbered.
|
173 |
+
# {HEADING_RESULTS} describes the expected outcome for each step itemised in, each outcome must be numbered {HEADING_STEPS}.
|
174 |
+
|
175 |
+
#output = ""
|
176 |
+
output_array = []
|
177 |
+
tests_remaining = num_tests
|
178 |
+
while tests_remaining > 0:
|
179 |
+
query_output = send_query(model, prompt, session_id, temperature, topp, max_tokens)
|
180 |
+
output_array.append(enforce_format(query_output, format))
|
181 |
+
|
182 |
+
tests_remaining -= num_tests_to_ask_for
|
183 |
+
if tests_remaining >= TESTS_PER_CALL:
|
184 |
+
num_tests_to_ask_for = TESTS_PER_CALL
|
185 |
+
else:
|
186 |
+
num_tests_to_ask_for = tests_remaining
|
187 |
+
prompt = f"Generate another {num_tests_to_ask_for} unique test cases using the same requirements in the same output format. Ensure the numbering is continuous"
|
188 |
+
|
189 |
+
output = format_separator.join(output_array)
|
190 |
+
|
191 |
+
rc[display_idx] = f"{formatting_prefix}{output}{formatting_suffix}"
|
192 |
+
print("TOTAL OUT:" + str(rc[display_idx]))
|
193 |
+
|
194 |
+
|
195 |
+
|
196 |
+
current_time = datetime.datetime.now()
|
197 |
+
filename = "gentests-" + current_time.strftime("%Y%m%d-%H%M") + ".tst"
|
198 |
+
|
199 |
+
|
200 |
+
#filename = "genai.tst"
|
201 |
+
download_to_file(rc[0], rc[1], rc[2], format, filename)
|
202 |
+
rc[4] = gr.DownloadButton(value=filename)
|
203 |
+
|
204 |
+
# #download_btn.click(fn=download_to_file,
|
205 |
+
# inputs=[html_box, json_box, text_box, format_file, format_gen],
|
206 |
+
# outputs=downloaded_md)
|
207 |
+
|
208 |
+
return rc
|
209 |
+
|
210 |
+
############################################################
|
211 |
+
#
|
212 |
+
# Sends the query to the playground
|
213 |
+
#
|
214 |
+
############################################################
|
215 |
+
def send_query(model, prompt, session_id, temperature, topp, max_tokens):
|
216 |
+
|
217 |
+
print("Model :" + str(model))
|
218 |
+
print("Max Tokens :" + str(max_tokens))
|
219 |
+
print("Session ID IN :" + str(session_id))
|
220 |
+
print("Temperature :" + str(temperature))
|
221 |
+
print("TopP :" + str(topp))
|
222 |
+
print("Prompt :" + str(prompt))
|
223 |
+
|
224 |
+
data = {
|
225 |
+
"action": "run",
|
226 |
+
"modelInterface": "langchain",
|
227 |
+
"data": {
|
228 |
+
"mode": "chain",
|
229 |
+
"text": prompt,
|
230 |
+
"files": [],
|
231 |
+
"modelName": model,
|
232 |
+
"provider": "bedrock",
|
233 |
+
"sessionId": str(session_id),
|
234 |
+
"workspaceId": WORKSPACE_ID,
|
235 |
+
"modelKwargs": {
|
236 |
+
"streaming": False,
|
237 |
+
"maxTokens": max_tokens,
|
238 |
+
"temperature": temperature,
|
239 |
+
"topP": topp
|
240 |
+
}
|
241 |
+
}
|
242 |
+
}
|
243 |
+
ws.send(json.dumps(data))
|
244 |
+
|
245 |
+
r1 = None
|
246 |
+
s1 = None
|
247 |
+
while r1 is None:
|
248 |
+
m1 = ws.recv()
|
249 |
+
j1 = json.loads(m1)
|
250 |
+
#print("J1:" + str(j1))
|
251 |
+
a1 = j1.get("action")
|
252 |
+
#print("A1:" + str(a1))
|
253 |
+
if "final_response" == a1:
|
254 |
+
r1 = j1.get("data", {}).get("content")
|
255 |
+
s1 = j1.get("data", {}).get("sessionId")
|
256 |
+
print("Response: " + str(r1))
|
257 |
+
if "error" == a1:
|
258 |
+
print("M1:" + str(m1))
|
259 |
+
|
260 |
+
print("Session ID OUT:" + str(s1))
|
261 |
+
|
262 |
+
return r1.strip()
|
263 |
+
|
264 |
+
|
265 |
+
##################################################################################
|
266 |
+
#
|
267 |
+
# Enforce Format Helper
|
268 |
+
#
|
269 |
+
##################################################################################
|
270 |
+
def strip_leading_and_trailing(text_block, start_str, end_str):
|
271 |
+
|
272 |
+
output = ""
|
273 |
+
idx = text_block.find(start_str)
|
274 |
+
print("leading idx=" + str(idx))
|
275 |
+
if idx != -1:
|
276 |
+
output = text_block[idx:]
|
277 |
+
#print("Output:" + str(output))
|
278 |
+
idx = output.rfind(end_str)
|
279 |
+
print("trailing idx=" + str(idx))
|
280 |
+
if idx != -1:
|
281 |
+
output = output[:idx + len(end_str)]
|
282 |
+
|
283 |
+
print("Stripped Output:" + str(output))
|
284 |
+
|
285 |
+
return output
|
286 |
+
|
287 |
+
##################################################################################
|
288 |
+
#
|
289 |
+
# Enforce Format
|
290 |
+
#
|
291 |
+
##################################################################################
|
292 |
+
def enforce_format(text_block, format):
|
293 |
+
|
294 |
+
print("Format=" + format)
|
295 |
+
if format == "HTML":
|
296 |
+
return strip_leading_and_trailing(text_block, "<tr>", "</tr>")
|
297 |
+
elif format == "JSON":
|
298 |
+
return strip_leading_and_trailing(text_block, "{", "}")
|
299 |
+
else:
|
300 |
+
return text_block
|
301 |
+
|
302 |
+
##################################################################################
|
303 |
+
#
|
304 |
+
# Method that changes the default value of Max Tokens based on the model selected
|
305 |
+
#
|
306 |
+
##################################################################################
|
307 |
+
def change_max_token_default(model_name):
|
308 |
+
number = model_dict[model_name]
|
309 |
+
return gr.Number(value=number, label="Max Tokens", scale=1)
|
310 |
+
|
311 |
+
|
312 |
+
##################################################################################
|
313 |
+
#
|
314 |
+
# Downloads the output to a file
|
315 |
+
#
|
316 |
+
##################################################################################
|
317 |
+
#def download_to_file(html, json_data, text, format_in, format_out):
|
318 |
+
def download_to_file(html, json_data, text, format_in, filename):
|
319 |
+
|
320 |
+
#file_path = "genai.tst"
|
321 |
+
with open(filename, "w") as output_file:
|
322 |
+
if format_in == "HTML":
|
323 |
+
output_file.write(str(html))
|
324 |
+
if format_in == "JSON":
|
325 |
+
json.dump(json_data, output_file, indent=3)
|
326 |
+
else:
|
327 |
+
output_file.write(str(text))
|
328 |
+
|
329 |
+
|
330 |
+
#text_str = str(format_in) + " tests written to file, " + file_path + " as " + str(format_out)
|
331 |
+
text_str = str(format_in) + " tests written to file, " + filename
|
332 |
+
print(text_str)
|
333 |
+
#return gr.Markdown(visible=True, value=text_str)
|
334 |
+
#return file_path
|
335 |
+
time.sleep(1)
|
336 |
+
return
|
337 |
+
|
338 |
+
|
339 |
+
#########################################################################################################
|
340 |
+
#
|
341 |
+
# MAIN
|
342 |
+
#
|
343 |
+
#########################################################################################################
|
344 |
+
if __name__ == "__main__":
|
345 |
+
|
346 |
+
global tests_generated
|
347 |
+
|
348 |
+
theme = gr.themes.Glass(primary_hue=gr.themes.colors.blue,
|
349 |
+
secondary_hue=gr.themes.colors.cyan)
|
350 |
+
#theme = gr.themes.Default()
|
351 |
+
#theme = gr.themes.Base()
|
352 |
+
#theme = gr.themes.Soft()
|
353 |
+
#theme = gr.themes.Monochrome()
|
354 |
+
|
355 |
+
#prompt_element_template = """element"""
|
356 |
+
prompt_element_template = """HSS"""
|
357 |
+
#prompt_subsystem_template = ""
|
358 |
+
prompt_subsystem_template = "Backup And Restore"
|
359 |
+
|
360 |
+
url = SOCKET_URL
|
361 |
+
ws = websocket.create_connection(url, header={"x-api-key": API_TOKEN})
|
362 |
+
|
363 |
+
# Session ID
|
364 |
+
#session_id = uuid4()
|
365 |
+
|
366 |
+
output_str = ""
|
367 |
+
with gr.Blocks(theme=theme) as demo:
|
368 |
+
|
369 |
+
generated_state = gr.State(False)
|
370 |
+
#gr.Label("Generate Tests for")
|
371 |
+
with gr.Row() as row1:
|
372 |
+
element = gr.Textbox(label="Generate tests for ", value=prompt_element_template, scale=2)
|
373 |
+
subsystem = gr.Textbox(label="Service ", value=prompt_subsystem_template, scale=1)
|
374 |
+
|
375 |
+
with gr.Row() as row2:
|
376 |
+
num_tests = gr.Number(value=10, label="Number")
|
377 |
+
format_gen = gr.Dropdown(choices=FORMAT_OPTIONS,
|
378 |
+
label="Format",
|
379 |
+
value="HTML")
|
380 |
+
|
381 |
+
role = gr.Dropdown(
|
382 |
+
choices=["Tester", "Software Engineer", "Customer", "Analyst"],
|
383 |
+
value="Tester",
|
384 |
+
label="Role")
|
385 |
+
type = gr.Dropdown(choices=[
|
386 |
+
"Sunny Day", "Rainy Day", "Functional", "High Availability",
|
387 |
+
"Resilience", "Acceptance"
|
388 |
+
],
|
389 |
+
value="Functional",
|
390 |
+
label="Test Type")
|
391 |
+
|
392 |
+
with gr.Row() as row3:
|
393 |
+
default_max_tokens = 2048
|
394 |
+
model = gr.Dropdown(choices=model_dict.keys(), value=list(model_dict.keys())[1], label="Model", scale=2)
|
395 |
+
temperature = gr.Number(value=0.4, label="Temperature", scale=1)
|
396 |
+
topp = gr.Number(value=0.9, label="TopP", scale=1)
|
397 |
+
max_tokens = gr.Number(value=4096, label="Max Tokens", scale=1)
|
398 |
+
model.select(fn=change_max_token_default, inputs=model, outputs=max_tokens)
|
399 |
+
|
400 |
+
|
401 |
+
|
402 |
+
gen_btn = gr.Button("Generate")
|
403 |
+
html_box = gr.HTML(visible=True)
|
404 |
+
json_box = gr.JSON(visible=False)
|
405 |
+
text_box = gr.Textbox(visible=False, show_label=False)
|
406 |
+
|
407 |
+
with gr.Column(visible=False) as col1:
|
408 |
+
with gr.Row() as row4:
|
409 |
+
#format_file = gr.Dropdown(choices=FORMAT_OPTIONS, label="File Format", value="JSON")
|
410 |
+
#download_btn = gr.Button("Download")
|
411 |
+
download_btn = gr.DownloadButton("Download")
|
412 |
+
#download_btn = gr.DownloadButton("Download", value=download_to_file, inputs=[html_box, json_box, text_box, format_file, format_gen])
|
413 |
+
#downloaded_md = gr.Markdown("""LLM Parameters""", visible=False)
|
414 |
+
#download_btn.click(fn=download_to_file,
|
415 |
+
# inputs=[html_box, json_box, text_box, format_file, format_gen],
|
416 |
+
# outputs=downloaded_md)
|
417 |
+
|
418 |
+
|
419 |
+
#output_box = output_list[0]
|
420 |
+
|
421 |
+
#
|
422 |
+
# Inline function to change visibility
|
423 |
+
#
|
424 |
+
def change_output_box(format):
|
425 |
+
value = format
|
426 |
+
|
427 |
+
#dd = gr.Dropdown(value=format)
|
428 |
+
col = gr.Column(visible=False)
|
429 |
+
if value == "HTML":
|
430 |
+
#return [gr.HTML(visible=True, value=""), gr.JSON(visible=False, value="{}"), gr.Textbox(visible=False, value=""), dd, col]
|
431 |
+
return [gr.HTML(visible=True, value=""), gr.JSON(visible=False, value="{}"), gr.Textbox(visible=False, value=""), col]
|
432 |
+
elif value == "JSON":
|
433 |
+
#return [gr.HTML(visible=False, value=""), gr.JSON(visible=True, value="{}"), gr.Textbox(visible=False, value=""), dd, col]
|
434 |
+
return [gr.HTML(visible=False, value=""), gr.JSON(visible=True, value="{}"), gr.Textbox(visible=False, value=""), col]
|
435 |
+
else:
|
436 |
+
#return [gr.HTML(visible=False, value=""), gr.JSON(visible=False, value="{}"), gr.Textbox(visible=True, value=""), dd, col]
|
437 |
+
return [gr.HTML(visible=False, value=""), gr.JSON(visible=False, value="{}"), gr.Textbox(visible=True, value=""), col]
|
438 |
+
|
439 |
+
#format_gen.select(fn=change_output_box, inputs=format_gen, outputs=[html_box, json_box, text_box, format_file, col1])
|
440 |
+
format_gen.select(fn=change_output_box, inputs=format_gen, outputs=[html_box, json_box, text_box, col1])
|
441 |
+
|
442 |
+
gen_btn.click(fn=generate_tests,
|
443 |
+
inputs=[
|
444 |
+
model, element, subsystem, format_gen, temperature, topp, max_tokens, num_tests,
|
445 |
+
role, type,
|
446 |
+
],
|
447 |
+
#outputs=output_list,
|
448 |
+
outputs=[html_box, json_box, text_box, col1, download_btn],
|
449 |
+
api_name="TCGen")
|
450 |
+
|
451 |
+
|
452 |
+
#demo.launch(share=True, server_name="0.0.0.0")
|
453 |
+
demo.launch(server_name="0.0.0.0")
|
454 |
+
ws.close()
|
json_format.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from format import Format
|
3 |
+
|
4 |
+
class JSONFormat(Format):
|
5 |
+
def __init__(self, body):
|
6 |
+
self.json = body
|
7 |
+
self.html = None
|
8 |
+
self.text = None
|
9 |
+
|
10 |
+
def method1(self):
|
11 |
+
# Method implementation here
|
12 |
+
pass
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio==4.26.0
|
2 |
+
langchain==0.1.11
|
3 |
+
websocket-client==0.53.0
|