ArvindSelvaraj's picture
Update backend.py
dac5949 verified
raw
history blame
4.26 kB
import json
import io
import requests
import html # For escaping HTML characters
from bs4 import BeautifulSoup
import pandas as pd # Added pandas for Excel export
# Initialize OpenAI API with Nvidia's Llama model
client = OpenAI(
base_url="https://integrate.api.nvidia.com/v1",
api_key="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"
)
def clean_test_case_output(text):
"""
Cleans the output to handle HTML characters and unwanted tags.
"""
text = html.unescape(text) # Unescape HTML entities
soup = BeautifulSoup(text, 'html.parser') # Use BeautifulSoup to handle HTML tags
cleaned_text = soup.get_text(separator="\n").strip() # Remove tags and handle newlines
return cleaned_text
def generate_testcases(user_story):
"""
Generates advanced QA test cases based on a provided user story by interacting
with Nvidia's llama model API. The prompt is refined for clarity,
and the output is processed for better quality.
:param user_story: A string representing the user story for which to generate test cases.
:return: A list of test cases in the form of dictionaries.
"""
# Few-shot learning examples to guide the model
few_shot_examples = """
"if its not a DropBury or ODAC Portal User Story, then we perform testing in Tech360 iOS App"
"Generate as many as testcases possible minimum 6 ,maximum it can be anything"
"Understand the story thoroughly"
"If it's a DropBury or ODAC Portal User Story, then we perform testing in ODAC Portal"
"""
# Combine the few-shot examples with the user story for the model to process
prompt = few_shot_examples + f"\nUser Story: {user_story}\n"
try:
# Call the Nvidia llama API with the refined prompt
completion = client.chat.completions.create(
model="meta/llama-3.1-405b-instruct", # Using llama3.1 405b model
messages=[
{"role": "user", "content": prompt}
],
temperature=0.03, # Further lowering temperature for precise and deterministic output
top_p=0.7, # Prioritize high-probability tokens even more
max_tokens=4096, # Increase max tokens to allow longer content
stream=True # Streaming the response for faster retrieval
)
# Initialize an empty string to accumulate the response
test_cases_text = ""
# Accumulate the response from the streaming chunks
for chunk in completion:
if chunk.choices[0].delta.content is not None:
test_cases_text += chunk.choices[0].delta.content
# Ensure the entire response is captured before cleaning
if test_cases_text.strip() == "":
return [{"test_case": "No test cases generated or output was empty."}]
# Clean the output by unescaping HTML entities and replacing <br> tags
test_cases_text = clean_test_case_output(test_cases_text)
try:
# Try to parse the output as JSON, assuming the model returns structured test cases
test_cases = json.loads(test_cases_text)
if isinstance(test_cases, list):
return test_cases # Return structured test cases
else:
return [{"test_case": test_cases_text}] # Return as a list with the text wrapped in a dict
except json.JSONDecodeError:
# Fallback: return the raw text if JSON parsing fails
return [{"test_case": test_cases_text}]
except requests.exceptions.RequestException as e:
print(f"API request failed: {str(e)}")
return []
def export_test_cases(test_cases):
if not test_cases:
return "No test cases to export."
# Use pandas to export the test cases to Excel
df = pd.DataFrame(test_cases)
output = io.BytesIO()
df.to_excel(output, index=False)
output.seek(0)
return output.getvalue()
def save_test_cases_as_file(test_cases):
if not test_cases:
return "No test cases to save."
# Use pandas to save the test cases to an Excel file
df = pd.DataFrame(test_cases)
df.to_excel('test_cases.xlsx', index=False)
return 'excel file saved'