Spaces:
Running
Running
import gradio as gr | |
import openai | |
import os | |
import json | |
import requests | |
import yaml | |
from PIL import Image | |
import io | |
import base64 | |
from typing import Dict, List, Any | |
import pandas as pd | |
import re | |
from datetime import datetime | |
from dotenv import load_dotenv | |
# Load environment variables | |
load_dotenv() | |
# Initialize OpenAI client | |
def init_openai(): | |
api_key = os.getenv("OPENAI_API_KEY") | |
if not api_key: | |
raise ValueError("β OpenAI API key not found in environment variables. Please set OPENAI_API_KEY.") | |
return openai.OpenAI(api_key=api_key) | |
client = init_openai() | |
# Enhanced utility functions | |
def encode_image(image): | |
"""Encode image to base64 for OpenAI Vision API""" | |
if isinstance(image, str): | |
with open(image, "rb") as image_file: | |
return base64.b64encode(image_file.read()).decode() | |
else: | |
buffer = io.BytesIO() | |
image.save(buffer, format="PNG") | |
return base64.b64encode(buffer.getvalue()).decode() | |
def call_openai_chat(messages, model="gpt-4o-mini", max_tokens=3000): | |
"""Enhanced OpenAI API call with better error handling""" | |
try: | |
response = client.chat.completions.create( | |
model=model, | |
messages=messages, | |
max_tokens=max_tokens, | |
temperature=0.3 # Lower temperature for more consistent results | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
return f"OpenAI API Error: {str(e)}" | |
def parse_test_cases_to_dataframe(text_response): | |
"""Improved parsing that handles various response formats""" | |
try: | |
test_cases = [] | |
# First, try to split by common delimiters | |
# Look for patterns like "Test Case", "TC", numbers followed by periods, etc. | |
potential_blocks = [] | |
# Split by various patterns that might indicate new test cases | |
split_patterns = [ | |
r'\n(?=Test Case[:\s#]*\d+)', | |
r'\n(?=TC[:\s#]*\d+)', | |
r'\n(?=\d+\.?\s*Test)', | |
r'\n(?=#{1,3}\s*Test)', | |
r'\n(?=\*\*Test Case)', | |
r'\n(?=## Test Case)', | |
] | |
blocks = [text_response] # Start with the whole text | |
for pattern in split_patterns: | |
new_blocks = [] | |
for block in blocks: | |
new_blocks.extend(re.split(pattern, block, flags=re.IGNORECASE | re.MULTILINE)) | |
blocks = [b for b in new_blocks if len(b.strip()) > 20] | |
if len(blocks) > 1: # If we found a good split, use it | |
break | |
# If we still have only one block, try a more aggressive split | |
if len(blocks) <= 1: | |
lines = text_response.split('\n') | |
current_block = [] | |
blocks = [] | |
for line in lines: | |
line = line.strip() | |
if not line: | |
continue | |
# Check if this line starts a new test case | |
if (re.match(r'(test case|tc)\s*[:#]?\s*\d+', line.lower()) or | |
re.match(r'\d+\.?\s*(test|scenario)', line.lower()) or | |
re.match(r'#{1,3}\s*test', line.lower()) or | |
'**Test Case' in line): | |
if current_block: | |
blocks.append('\n'.join(current_block)) | |
current_block = [] | |
current_block.append(line) | |
else: | |
current_block.append(line) | |
if current_block: | |
blocks.append('\n'.join(current_block)) | |
# Parse each block | |
for i, block in enumerate(blocks): | |
if len(block.strip()) < 20: | |
continue | |
# Extract information using multiple strategies | |
test_case = {} | |
# Strategy 1: Look for labeled fields | |
lines = [line.strip() for line in block.split('\n') if line.strip()] | |
# Initialize with defaults | |
test_case['Test_Case_ID'] = f"TC_{len(test_cases)+1:03d}" | |
test_case['Title'] = f"Test Case {len(test_cases)+1}" | |
test_case['Preconditions'] = "N/A" | |
test_case['Test_Steps'] = "" | |
test_case['Expected_Results'] = "" | |
test_case['Priority'] = "Medium" | |
test_case['Test_Data'] = "N/A" | |
# Parse line by line for labeled content | |
current_field = None | |
steps_lines = [] | |
for line in lines: | |
line_lower = line.lower() | |
# Check for field labels | |
if any(label in line_lower for label in ['test case id', 'tc id', 'id:']): | |
test_case['Test_Case_ID'] = re.sub(r'.*?[:\-]\s*', '', line).strip() | |
current_field = None | |
elif any(label in line_lower for label in ['title:', 'test case title', 'name:']): | |
test_case['Title'] = re.sub(r'.*?[:\-]\s*', '', line).strip() | |
current_field = None | |
elif any(label in line_lower for label in ['precondition', 'prerequisite']): | |
test_case['Preconditions'] = re.sub(r'.*?[:\-]\s*', '', line).strip() | |
current_field = None | |
elif any(label in line_lower for label in ['test step', 'steps:', 'procedure']): | |
current_field = 'steps' | |
step_content = re.sub(r'.*?[:\-]\s*', '', line).strip() | |
if step_content: | |
steps_lines.append(step_content) | |
elif any(label in line_lower for label in ['expected result', 'expected:', 'result:']): | |
test_case['Expected_Results'] = re.sub(r'.*?[:\-]\s*', '', line).strip() | |
current_field = None | |
elif any(label in line_lower for label in ['priority:', 'severity:']): | |
test_case['Priority'] = re.sub(r'.*?[:\-]\s*', '', line).strip() | |
current_field = None | |
elif any(label in line_lower for label in ['test data:', 'data:']): | |
test_case['Test_Data'] = re.sub(r'.*?[:\-]\s*', '', line).strip() | |
current_field = None | |
else: | |
# If we're in the middle of collecting steps, add this line | |
if current_field == 'steps': | |
steps_lines.append(line) | |
elif not any(test_case.values()): # If nothing has been set yet, this might be the title | |
if len(line) > 5 and not line.startswith(('β’', '-', '*', '1.', '2.')): | |
test_case['Title'] = line[:100] | |
# Combine steps | |
if steps_lines: | |
test_case['Test_Steps'] = '\n'.join(steps_lines) | |
# Fallback: if we couldn't parse properly, use the first few lines | |
if (test_case['Title'] == f"Test Case {len(test_cases)+1}" and | |
test_case['Test_Steps'] == "" and | |
test_case['Expected_Results'] == ""): | |
if len(lines) > 0: | |
test_case['Title'] = lines[0][:100] | |
if len(lines) > 1: | |
test_case['Test_Steps'] = '\n'.join(lines[1:min(len(lines), 4)]) | |
if len(lines) > 4: | |
test_case['Expected_Results'] = lines[-1][:200] | |
# Add metadata | |
test_case['Created_Date'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
test_case['Status'] = "New" | |
test_cases.append(test_case) | |
# Final fallback - create from raw text | |
if not test_cases: | |
test_cases.append({ | |
'Test_Case_ID': 'TC_001', | |
'Title': 'Generated Test Case', | |
'Preconditions': 'N/A', | |
'Test_Steps': text_response[:500] + "..." if len(text_response) > 500 else text_response, | |
'Expected_Results': 'Review generated content', | |
'Priority': 'Medium', | |
'Test_Data': 'N/A', | |
'Created_Date': datetime.now().strftime("%Y-%m-%d %H:%M:%S"), | |
'Status': 'Needs Review' | |
}) | |
return pd.DataFrame(test_cases) | |
except Exception as e: | |
# Error fallback | |
return pd.DataFrame({ | |
'Test_Case_ID': ['TC_001'], | |
'Title': ['Parsing Error'], | |
'Preconditions': ['N/A'], | |
'Test_Steps': [f'Error parsing response: {str(e)}\n\nOriginal text: {text_response[:300]}...'], | |
'Expected_Results': ['Manual review required'], | |
'Priority': ['High'], | |
'Test_Data': ['N/A'], | |
'Created_Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], | |
'Status': ['Error'] | |
}) | |
def parse_api_tests_to_dataframe(text_response): | |
"""Enhanced API test parsing""" | |
try: | |
api_tests = [] | |
# Enhanced patterns for API tests | |
test_id_pattern = r'(?:Test Case|API Test|Test)\s*(?:ID|#)?\s*:?\s*([^\n]+)' | |
method_pattern = r'(?:HTTP Method|Method)\s*:?\s*([^\n]+)' | |
endpoint_pattern = r'(?:Endpoint|URL|Path)\s*:?\s*([^\n]+)' | |
description_pattern = r'(?:Description|Test Description)\s*:?\s*([^\n]+)' | |
headers_pattern = r'(?:Request Headers?|Headers)\s*:?\s*((?:[^\n]*\n?)*?)(?=Request Body|Expected|Test Case|$)' | |
body_pattern = r'(?:Request Body|Body|Payload)\s*:?\s*((?:[^\n]*\n?)*?)(?=Expected|Response|Test Case|$)' | |
status_pattern = r'(?:Expected Status|Status Code|Response Code)\s*:?\s*([^\n]+)' | |
response_pattern = r'(?:Expected Response|Response)\s*:?\s*((?:[^\n]*\n?)*?)(?=Test Case|$)' | |
category_pattern = r'(?:Category|Type)\s*:?\s*([^\n]+)' | |
blocks = re.split(r'\n\s*(?=(?:Test Case|API Test))', text_response, flags=re.IGNORECASE) | |
for block in blocks: | |
if len(block.strip()) < 30: | |
continue | |
api_test = {} | |
id_match = re.search(test_id_pattern, block, re.IGNORECASE) | |
api_test['Test_Case_ID'] = id_match.group(1).strip() if id_match else f"API_TC_{len(api_tests)+1:03d}" | |
method_match = re.search(method_pattern, block, re.IGNORECASE) | |
api_test['HTTP_Method'] = method_match.group(1).strip() if method_match else "GET" | |
endpoint_match = re.search(endpoint_pattern, block, re.IGNORECASE) | |
api_test['Endpoint'] = endpoint_match.group(1).strip() if endpoint_match else "/api/endpoint" | |
desc_match = re.search(description_pattern, block, re.IGNORECASE) | |
api_test['Description'] = desc_match.group(1).strip() if desc_match else "API Test Description" | |
headers_match = re.search(headers_pattern, block, re.IGNORECASE | re.DOTALL) | |
api_test['Request_Headers'] = headers_match.group(1).strip() if headers_match else "Content-Type: application/json" | |
body_match = re.search(body_pattern, block, re.IGNORECASE | re.DOTALL) | |
api_test['Request_Body'] = body_match.group(1).strip() if body_match else "N/A" | |
status_match = re.search(status_pattern, block, re.IGNORECASE) | |
api_test['Expected_Status_Code'] = status_match.group(1).strip() if status_match else "200" | |
response_match = re.search(response_pattern, block, re.IGNORECASE | re.DOTALL) | |
api_test['Expected_Response'] = response_match.group(1).strip() if response_match else "Success response" | |
category_match = re.search(category_pattern, block, re.IGNORECASE) | |
api_test['Test_Category'] = category_match.group(1).strip() if category_match else "Functional" | |
api_test['Created_Date'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
api_test['Status'] = "New" | |
api_tests.append(api_test) | |
return pd.DataFrame(api_tests) if api_tests else pd.DataFrame({ | |
'Test_Case_ID': ['API_TC_001'], | |
'HTTP_Method': ['GET'], | |
'Endpoint': ['/api/test'], | |
'Description': ['Sample API Test'], | |
'Request_Headers': ['Content-Type: application/json'], | |
'Request_Body': ['N/A'], | |
'Expected_Status_Code': ['200'], | |
'Expected_Response': ['Success'], | |
'Test_Category': ['Functional'], | |
'Created_Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], | |
'Status': ['New'] | |
}) | |
except Exception as e: | |
return pd.DataFrame({ | |
'Test_Case_ID': ['API_TC_001'], | |
'HTTP_Method': ['GET'], | |
'Endpoint': ['/api/error'], | |
'Description': [f'Parsing Error: {str(e)}'], | |
'Request_Headers': ['Content-Type: application/json'], | |
'Request_Body': ['N/A'], | |
'Expected_Status_Code': ['500'], | |
'Expected_Response': ['Error'], | |
'Test_Category': ['Error'], | |
'Created_Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], | |
'Status': ['Error'] | |
}) | |
def create_download_csv(df, filename_prefix): | |
"""Create CSV for download""" | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
filename = f"{filename_prefix}_{timestamp}.csv" | |
csv_path = f"/tmp/{filename}" | |
df.to_csv(csv_path, index=False) | |
return csv_path | |
# Enhanced test case generation with better prompts | |
def generate_test_cases_from_text(requirements, test_types, priority_level): | |
"""Enhanced test case generation with more specific prompts""" | |
enhanced_prompt = f""" | |
As an expert QA engineer, create comprehensive and detailed test cases for the following requirements. | |
REQUIREMENTS: | |
{requirements} | |
INSTRUCTIONS: | |
- Generate {test_types} test scenarios | |
- Focus on {priority_level} priority tests | |
- Use EXACTLY this format for each test case | |
- Generate 5-8 test cases with clear separation | |
FORMAT EACH TEST CASE EXACTLY AS SHOWN: | |
Test Case ID: TC_001 | |
Title: Login with valid credentials | |
Preconditions: User has a valid account, application is accessible | |
Test Steps: | |
1. Navigate to login page | |
2. Enter valid username | |
3. Enter valid password | |
4. Click login button | |
Expected Results: User successfully logs in and redirects to dashboard | |
Priority: High | |
Test Data: username: testuser@example.com, password: Test123! | |
Test Case ID: TC_002 | |
Title: Login with invalid credentials | |
Preconditions: Application is accessible | |
Test Steps: | |
1. Navigate to login page | |
2. Enter invalid username | |
3. Enter invalid password | |
4. Click login button | |
Expected Results: Error message displayed, user remains on login page | |
Priority: High | |
Test Data: username: invalid@test.com, password: wrongpass | |
Now generate test cases for the given requirements following this EXACT format. | |
""" | |
messages = [{"role": "user", "content": enhanced_prompt}] | |
response = call_openai_chat(messages, max_tokens=4000) | |
if "Error:" in response: | |
return response, None, None | |
df = parse_test_cases_to_dataframe(response) | |
csv_path = create_download_csv(df, "generated_test_cases") | |
return response, df, csv_path | |
def generate_test_cases_from_image(image, test_focus): | |
"""Enhanced image-based test case generation""" | |
if image is None: | |
return "Please upload an image first.", None, None | |
base64_image = encode_image(image) | |
enhanced_prompt = f""" | |
As an expert QA engineer, analyze this UI/wireframe/mockup image and create test cases. | |
FOCUS AREA: {test_focus} | |
Use EXACTLY this format for each test case: | |
Test Case ID: TC_001 | |
Title: [Clear descriptive title] | |
Preconditions: [Setup requirements] | |
Test Steps: | |
1. [Step 1] | |
2. [Step 2] | |
3. [Step 3] | |
Expected Results: [Expected outcome] | |
Priority: [High/Medium/Low] | |
Test Data: [Required data] | |
Generate 5-8 test cases covering all visible UI elements and user flows. | |
""" | |
messages = [ | |
{ | |
"role": "user", | |
"content": [ | |
{"type": "text", "text": enhanced_prompt}, | |
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}} | |
] | |
} | |
] | |
response = call_openai_chat(messages, model="gpt-4o-mini", max_tokens=4000) | |
if "Error:" in response: | |
return response, None, None | |
df = parse_test_cases_to_dataframe(response) | |
csv_path = create_download_csv(df, "image_based_test_cases") | |
return response, df, csv_path | |
def optimize_test_cases(existing_cases, focus_areas, optimization_goal): | |
"""Enhanced test case optimization""" | |
focus_text = ", ".join(focus_areas) if focus_areas else "overall quality" | |
enhanced_prompt = f""" | |
As a senior QA engineer, optimize the following test cases with focus on {focus_text}. | |
OPTIMIZATION GOAL: {optimization_goal} | |
EXISTING TEST CASES: | |
{existing_cases} | |
OPTIMIZATION REQUIREMENTS: | |
1. Improve clarity and specificity of test steps | |
2. Enhance test data specifications | |
3. Optimize test coverage and reduce redundancy | |
4. Ensure traceability to requirements | |
5. Improve maintainability and reusability | |
6. Add risk-based prioritization | |
7. Include automation feasibility assessment | |
PROVIDE: | |
1. Optimized test cases in standard format | |
2. Summary of improvements made | |
3. Recommendations for test strategy | |
4. Risk assessment and mitigation suggestions | |
FORMAT OPTIMIZED TEST CASES AS: | |
Test Case ID: TC_XXX | |
Test Case Title: [Improved title] | |
Preconditions: [Enhanced preconditions] | |
Test Steps: [Optimized steps with better clarity] | |
Expected Results: [More specific expected results] | |
Priority: [Risk-based priority] | |
Test Data: [Detailed test data specifications] | |
Automation Feasibility: [High/Medium/Low] | |
""" | |
messages = [{"role": "user", "content": enhanced_prompt}] | |
response = call_openai_chat(messages, max_tokens=4000) | |
if "Error:" in response: | |
return response, None | |
df = parse_test_cases_to_dataframe(response) | |
csv_path = create_download_csv(df, "optimized_test_cases") | |
return response, csv_path | |
def answer_qa_question(test_cases_content, question, analysis_type): | |
"""Enhanced Q&A with different analysis types""" | |
enhanced_prompt = f""" | |
As a QA expert, analyze the provided test cases and answer the following question with {analysis_type} analysis. | |
TEST CASES: | |
{test_cases_content} | |
QUESTION: {question} | |
ANALYSIS TYPE: {analysis_type} | |
INSTRUCTIONS: | |
- Provide detailed, actionable insights | |
- Reference specific test cases where relevant | |
- Include quantitative analysis where possible | |
- Suggest improvements or recommendations | |
- Consider industry best practices | |
If the question relates to: | |
- Coverage: Analyze what's covered and gaps | |
- Quality: Assess test case quality and completeness | |
- Strategy: Provide strategic recommendations | |
- Automation: Evaluate automation potential | |
- Risk: Identify and assess testing risks | |
""" | |
messages = [{"role": "user", "content": enhanced_prompt}] | |
response = call_openai_chat(messages, max_tokens=3000) | |
return response | |
def fetch_swagger_spec(url): | |
"""Enhanced Swagger spec fetching with better error handling""" | |
try: | |
headers = { | |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36', | |
'Accept': 'application/json, application/yaml, text/yaml, */*' | |
} | |
response = requests.get(url, timeout=30, headers=headers) | |
response.raise_for_status() | |
content_type = response.headers.get('content-type', '').lower() | |
if 'yaml' in content_type or url.endswith(('.yaml', '.yml')): | |
return yaml.safe_load(response.text) | |
else: | |
return response.json() | |
except requests.exceptions.Timeout: | |
return {"error": "Request timeout - URL took too long to respond"} | |
except requests.exceptions.ConnectionError: | |
return {"error": "Connection error - Unable to reach the URL"} | |
except requests.exceptions.HTTPError as e: | |
return {"error": f"HTTP error {e.response.status_code}: {e.response.reason}"} | |
except yaml.YAMLError as e: | |
return {"error": f"YAML parsing error: {str(e)}"} | |
except json.JSONDecodeError as e: | |
return {"error": f"JSON parsing error: {str(e)}"} | |
except Exception as e: | |
return {"error": f"Unexpected error: {str(e)}"} | |
def generate_api_test_cases(swagger_url, endpoints_filter, test_types, include_security): | |
"""Enhanced API test case generation""" | |
spec = fetch_swagger_spec(swagger_url) | |
if "error" in spec: | |
return f"Error fetching Swagger spec: {spec['error']}", None | |
# Limit spec size for prompt | |
spec_summary = json.dumps(spec, indent=2)[:8000] + "..." if len(json.dumps(spec)) > 8000 else json.dumps(spec, indent=2) | |
security_instruction = "\n- Include security testing scenarios (authentication, authorization, input validation)" if include_security else "" | |
enhanced_prompt = f""" | |
As an API testing expert, generate comprehensive test cases for the following OpenAPI/Swagger specification: | |
SWAGGER SPECIFICATION: | |
{spec_summary} | |
FILTER: {endpoints_filter if endpoints_filter else "All endpoints"} | |
TEST TYPES: {", ".join(test_types)} | |
INSTRUCTIONS: | |
- Create detailed test cases for each endpoint | |
- Include positive, negative, and boundary test scenarios | |
- Cover different HTTP methods and status codes | |
- Include request/response validation | |
- Test error handling and edge cases{security_instruction} | |
- Consider API rate limiting and performance | |
FORMAT EACH API TEST CASE AS: | |
Test Case ID: API_TC_XXX | |
HTTP Method: [GET/POST/PUT/DELETE] | |
Endpoint: [Full endpoint path] | |
Test Description: [What this test validates] | |
Request Headers: [Required headers with examples] | |
Request Body: [JSON payload if applicable] | |
Expected Status Code: [HTTP status code] | |
Expected Response: [Expected response structure/content] | |
Test Category: [Functional/Security/Performance/Negative] | |
Test Data: [Specific test data requirements] | |
Generate comprehensive test coverage for the API. | |
""" | |
messages = [{"role": "user", "content": enhanced_prompt}] | |
response = call_openai_chat(messages, max_tokens=5000) | |
if "Error:" in response: | |
return response, None | |
df = parse_api_tests_to_dataframe(response) | |
csv_path = create_download_csv(df, "api_test_cases") | |
return response, csv_path | |
def generate_automation_code(manual_tests, framework, language, include_reporting): | |
"""Enhanced automation code generation""" | |
reporting_instruction = "\n- Include test reporting and logging mechanisms" if include_reporting else "" | |
enhanced_prompt = f""" | |
As a test automation expert, convert the following manual test cases into production-ready automation code. | |
MANUAL TEST CASES: | |
{manual_tests} | |
FRAMEWORK: {framework} | |
LANGUAGE: {language} | |
REQUIREMENTS: | |
- Generate complete, executable automation code | |
- Follow best practices and design patterns | |
- Include proper error handling and assertions | |
- Implement page object model (if applicable) | |
- Add configuration management | |
- Include setup and teardown methods | |
- Use appropriate wait strategies | |
- Implement data-driven testing approaches{reporting_instruction} | |
- Add meaningful comments and documentation | |
- Include dependency management (requirements/package files) | |
DELIVERABLES: | |
1. Main test file with complete implementation | |
2. Configuration file (if applicable) | |
3. Requirements/dependencies file | |
4. README with setup instructions | |
5. Best practices documentation | |
Generate production-ready, maintainable automation code. | |
""" | |
messages = [{"role": "user", "content": enhanced_prompt}] | |
response = call_openai_chat(messages, max_tokens=5000) | |
# Create metadata DataFrame | |
automation_df = pd.DataFrame({ | |
'Framework': [framework], | |
'Language': [language], | |
'Code_Lines': [len(response.split('\n'))], | |
'Generated_Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], | |
'Include_Reporting': [include_reporting], | |
'Estimated_Setup_Time': ['30-60 minutes'], | |
'Complexity': ['Medium' if len(response.split('\n')) > 100 else 'Low'] | |
}) | |
csv_path = create_download_csv(automation_df, "automation_metadata") | |
return response, csv_path | |
def compare_images(expected_image, actual_image, comparison_type, sensitivity): | |
"""Enhanced visual comparison with sensitivity settings""" | |
expected_b64 = encode_image(expected_image) | |
actual_b64 = encode_image(actual_image) | |
sensitivity_instruction = { | |
"High": "Detect even minor differences in pixels, colors, and spacing", | |
"Medium": "Focus on noticeable differences that affect user experience", | |
"Low": "Only report significant differences that impact functionality" | |
} | |
enhanced_prompt = f""" | |
As a visual testing expert, perform a detailed {comparison_type} between these two images. | |
COMPARISON TYPE: {comparison_type} | |
SENSITIVITY: {sensitivity} - {sensitivity_instruction[sensitivity]} | |
The first image is the expected result, the second is the actual result. | |
ANALYSIS REQUIREMENTS: | |
1. Overall Pass/Fail determination | |
2. Specific differences with locations and descriptions | |
3. Similarity percentage calculation | |
4. Impact assessment (High/Medium/Low) for each difference | |
5. Root cause analysis for major differences | |
6. Recommendations for fixing issues | |
7. Areas that match perfectly | |
8. Suggestions for improving visual test stability | |
FOCUS AREAS: | |
- Layout and positioning accuracy | |
- Color consistency and contrast | |
- Text rendering and typography | |
- Image quality and resolution | |
- Responsive design elements | |
- Cross-browser compatibility indicators | |
Provide actionable insights for the development team. | |
""" | |
messages = [ | |
{ | |
"role": "user", | |
"content": [ | |
{"type": "text", "text": enhanced_prompt}, | |
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{expected_b64}"}}, | |
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{actual_b64}"}} | |
] | |
} | |
] | |
response = call_openai_chat(messages, model="gpt-4o-mini", max_tokens=3000) | |
# Create comparison summary DataFrame | |
comparison_df = pd.DataFrame({ | |
'Comparison_Type': [comparison_type], | |
'Sensitivity': [sensitivity], | |
'Timestamp': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], | |
'Expected_Image_Size': [f"{expected_image.size[0]}x{expected_image.size[1]}"], | |
'Actual_Image_Size': [f"{actual_image.size[0]}x{actual_image.size[1]}"], | |
'Analysis_Length': [len(response)], | |
'Status': ['Completed'] | |
}) | |
csv_path = create_download_csv(comparison_df, "visual_comparison_summary") | |
return response, csv_path | |
# Create Gradio interface | |
def create_gradio_interface(): | |
with gr.Blocks(title="π§ͺ AI Testing Magic", theme=gr.themes.Soft()) as app: | |
gr.Markdown(""" | |
# π§ͺ AI Testing Magic | |
### Bringing magic to every phase of software testing! π | |
Choose a testing tool from the tabs below to get started. | |
""") | |
with gr.Tabs(): | |
# Test Case Creation Tab | |
with gr.TabItem("π Create Test Cases"): | |
gr.Markdown("### Create comprehensive test cases from requirements") | |
with gr.Tabs(): | |
with gr.TabItem("Text Requirements"): | |
with gr.Row(): | |
with gr.Column(): | |
requirements_input = gr.Textbox( | |
label="Requirements", | |
placeholder="Enter your requirements here...", | |
lines=8 | |
) | |
test_types = gr.Dropdown( | |
choices=["Functional Tests", "Integration Tests", "Regression Tests", "User Acceptance Tests", "All Types"], | |
value="Functional Tests", | |
label="Test Types" | |
) | |
priority_level = gr.Dropdown( | |
choices=["High Priority", "Medium Priority", "Low Priority", "All Priorities"], | |
value="All Priorities", | |
label="Priority Focus" | |
) | |
generate_btn = gr.Button("β¨ Generate Test Cases", variant="primary") | |
with gr.Column(): | |
test_cases_output = gr.Textbox( | |
label="Generated Test Cases", | |
lines=10, | |
max_lines=15 | |
) | |
# Add a separate row for the dataframe and download | |
with gr.Row(): | |
test_cases_table = gr.Dataframe( | |
label="π Test Cases Table", | |
interactive=False, | |
wrap=True | |
) | |
with gr.Row(): | |
csv_download = gr.File(label="π₯ Download CSV") | |
generate_btn.click( | |
fn=generate_test_cases_from_text, | |
inputs=[requirements_input, test_types, priority_level], | |
outputs=[test_cases_output, test_cases_table, csv_download] | |
) | |
with gr.TabItem("Image Requirements"): | |
with gr.Row(): | |
with gr.Column(): | |
image_input = gr.Image( | |
label="Upload Requirements Image", | |
type="pil" | |
) | |
test_focus = gr.Dropdown( | |
choices=["UI/UX Testing", "Functional Testing", "Usability Testing", "Accessibility Testing", "All Areas"], | |
value="All Areas", | |
label="Test Focus" | |
) | |
generate_img_btn = gr.Button("β¨ Generate Test Cases from Image", variant="primary") | |
with gr.Column(): | |
img_test_cases_output = gr.Textbox( | |
label="Generated Test Cases", | |
lines=10, | |
max_lines=15 | |
) | |
# Add separate row for table and download | |
with gr.Row(): | |
img_test_cases_table = gr.Dataframe( | |
label="π Test Cases Table", | |
interactive=False, | |
wrap=True | |
) | |
with gr.Row(): | |
img_csv_download = gr.File(label="π₯ Download CSV") | |
generate_img_btn.click( | |
fn=generate_test_cases_from_image, | |
inputs=[image_input, test_focus], | |
outputs=[img_test_cases_output, img_test_cases_table, img_csv_download] | |
) | |
# Test Case Optimization Tab | |
with gr.TabItem("β‘ Optimize Test Cases"): | |
gr.Markdown("### Review and refine test cases for maximum effectiveness") | |
with gr.Row(): | |
with gr.Column(): | |
existing_cases_input = gr.Textbox( | |
label="Existing Test Cases", | |
placeholder="Paste your existing test cases here...", | |
lines=10 | |
) | |
focus_areas = gr.CheckboxGroup( | |
choices=["Clarity", "Completeness", "Coverage", "Efficiency", "Maintainability", "Edge Cases", "Risk Assessment"], | |
value=["Clarity", "Coverage"], | |
label="Optimization Focus Areas" | |
) | |
optimization_goal = gr.Dropdown( | |
choices=["Improve Test Quality", "Reduce Test Execution Time", "Enhance Coverage", "Better Maintainability", "Risk-Based Optimization"], | |
value="Improve Test Quality", | |
label="Optimization Goal" | |
) | |
optimize_btn = gr.Button("π Optimize Test Cases", variant="primary") | |
with gr.Column(): | |
optimized_output = gr.Textbox( | |
label="Optimized Test Cases", | |
lines=15, | |
max_lines=20 | |
) | |
opt_csv_download = gr.File(label="Download Optimized CSV") | |
optimize_btn.click( | |
fn=optimize_test_cases, | |
inputs=[existing_cases_input, focus_areas, optimization_goal], | |
outputs=[optimized_output, opt_csv_download] | |
) | |
# Q&A Assistant Tab | |
with gr.TabItem("β Q&A Assistant"): | |
gr.Markdown("### Ask questions about your test cases and get expert insights") | |
with gr.Row(): | |
with gr.Column(): | |
test_cases_file = gr.File( | |
label="Upload Test Cases File (TXT, CSV, JSON)", | |
file_types=[".txt", ".csv", ".json"] | |
) | |
test_cases_text = gr.Textbox( | |
label="Or Paste Test Cases Here", | |
placeholder="Paste your test cases...", | |
lines=8 | |
) | |
question_input = gr.Textbox( | |
label="Your Question", | |
placeholder="e.g., What test cases cover the login functionality?", | |
lines=2 | |
) | |
analysis_type = gr.Dropdown( | |
choices=["Coverage Analysis", "Quality Assessment", "Strategy Recommendations", "Automation Feasibility", "Risk Analysis"], | |
value="Coverage Analysis", | |
label="Analysis Type" | |
) | |
qa_btn = gr.Button("π Get Answer", variant="primary") | |
with gr.Column(): | |
qa_output = gr.Textbox( | |
label="Expert Answer", | |
lines=15, | |
max_lines=20 | |
) | |
def process_qa(file, text, question, analysis): | |
content = text | |
if file: | |
try: | |
if file.name.endswith('.csv'): | |
df = pd.read_csv(file.name) | |
content = df.to_string() | |
else: | |
with open(file.name, 'r') as f: | |
content = f.read() | |
except Exception as e: | |
content = f"Error reading file: {str(e)}" | |
return answer_qa_question(content, question, analysis) | |
qa_btn.click( | |
fn=process_qa, | |
inputs=[test_cases_file, test_cases_text, question_input, analysis_type], | |
outputs=[qa_output] | |
) | |
# API Test Cases Tab | |
with gr.TabItem("π API Test Cases"): | |
gr.Markdown("### Generate comprehensive API test cases from Swagger/OpenAPI specifications") | |
with gr.Row(): | |
with gr.Column(): | |
swagger_url = gr.Textbox( | |
label="Swagger/OpenAPI URL", | |
placeholder="https://petstore.swagger.io/v2/swagger.json", | |
lines=1 | |
) | |
endpoints_filter = gr.Textbox( | |
label="Filter Endpoints (Optional)", | |
placeholder="e.g., /users, /pets, /orders", | |
lines=1 | |
) | |
api_test_types = gr.CheckboxGroup( | |
choices=["Positive Tests", "Negative Tests", "Boundary Tests", "Security Tests", "Performance Tests"], | |
value=["Positive Tests", "Negative Tests"], | |
label="Test Types" | |
) | |
include_security = gr.Checkbox( | |
label="Include Security Testing", | |
value=True | |
) | |
api_generate_btn = gr.Button("π Generate API Test Cases", variant="primary") | |
with gr.Column(): | |
api_output = gr.Textbox( | |
label="Generated API Test Cases", | |
lines=15, | |
max_lines=20 | |
) | |
api_csv_download = gr.File(label="Download API Tests CSV") | |
api_generate_btn.click( | |
fn=generate_api_test_cases, | |
inputs=[swagger_url, endpoints_filter, api_test_types, include_security], | |
outputs=[api_output, api_csv_download] | |
) | |
# Automation Code Tab | |
with gr.TabItem("π€ Automate Manual Tests"): | |
gr.Markdown("### Convert manual test cases into production-ready automation code") | |
with gr.Row(): | |
with gr.Column(): | |
manual_tests = gr.Textbox( | |
label="Manual Test Cases", | |
placeholder="Paste your manual test cases here...", | |
lines=10 | |
) | |
automation_framework = gr.Dropdown( | |
choices=[ | |
"Selenium WebDriver (Python)", | |
"Playwright (Python)", | |
"Cypress (JavaScript)", | |
"Selenium WebDriver (Java)", | |
"RestAssured (Java)", | |
"TestNG (Java)", | |
"PyTest (Python)", | |
"Robot Framework" | |
], | |
value="Selenium WebDriver (Python)", | |
label="Automation Framework" | |
) | |
programming_language = gr.Dropdown( | |
choices=["Python", "JavaScript", "Java", "C#", "TypeScript"], | |
value="Python", | |
label="Programming Language" | |
) | |
include_reporting = gr.Checkbox( | |
label="Include Test Reporting", | |
value=True | |
) | |
automation_btn = gr.Button("π§ Generate Automation Code", variant="primary") | |
with gr.Column(): | |
automation_output = gr.Code( | |
label="Generated Automation Code", | |
language="python", | |
lines=15 | |
) | |
automation_csv_download = gr.File(label="Download Metadata CSV") | |
def update_code_language(lang): | |
lang_map = { | |
"Python": "python", | |
"JavaScript": "javascript", | |
"Java": "java", | |
"C#": "csharp", | |
"TypeScript": "typescript" | |
} | |
return gr.Code(language=lang_map.get(lang, "python")) | |
programming_language.change( | |
fn=update_code_language, | |
inputs=[programming_language], | |
outputs=[automation_output] | |
) | |
automation_btn.click( | |
fn=generate_automation_code, | |
inputs=[manual_tests, automation_framework, programming_language, include_reporting], | |
outputs=[automation_output, automation_csv_download] | |
) | |
# Visual Validation Tab | |
with gr.TabItem("ποΈ Visual Validation"): | |
gr.Markdown("### Compare expected and actual images with AI-powered analysis") | |
with gr.Row(): | |
with gr.Column(): | |
expected_image = gr.Image( | |
label="Expected Image", | |
type="pil" | |
) | |
actual_image = gr.Image( | |
label="Actual Image", | |
type="pil" | |
) | |
comparison_type = gr.Dropdown( | |
choices=["Layout Comparison", "Color Comparison", "Text Comparison", "Complete UI Comparison", "Responsive Design Check"], | |
value="Complete UI Comparison", | |
label="Comparison Type" | |
) | |
sensitivity = gr.Dropdown( | |
choices=["High", "Medium", "Low"], | |
value="Medium", | |
label="Detection Sensitivity" | |
) | |
visual_btn = gr.Button("π Compare Images", variant="primary") | |
with gr.Column(): | |
visual_output = gr.Textbox( | |
label="Comparison Results", | |
lines=15, | |
max_lines=20 | |
) | |
visual_csv_download = gr.File(label="Download Comparison Summary CSV") | |
visual_btn.click( | |
fn=compare_images, | |
inputs=[expected_image, actual_image, comparison_type, sensitivity], | |
outputs=[visual_output, visual_csv_download] | |
) | |
# Footer with enhanced information | |
gr.Markdown(""" | |
--- | |
## π Enhanced Features | |
### β **Streamlined Test Case Creation** | |
- Create test cases from text requirements with priority and type selection | |
- Generate test cases from UI mockups and wireframes using AI vision | |
- Enhanced parsing with better accuracy and structured output | |
### β‘ **Advanced Test Case Optimization** | |
- Multi-dimensional optimization focusing on specific quality areas | |
- Risk-based prioritization and automation feasibility assessment | |
- Detailed improvement recommendations and best practices | |
### β **Intelligent Q&A Assistant** | |
- Multiple analysis types: coverage, quality, strategy, automation, risk | |
- Support for various file formats and intelligent content parsing | |
- Expert-level insights with actionable recommendations | |
### π **Comprehensive API Test Generation** | |
- Enhanced Swagger/OpenAPI parsing with better error handling | |
- Security testing scenarios and performance considerations | |
- Multiple test types with detailed request/response validation | |
### π€ **Production-Ready Automation Code** | |
- Support for modern frameworks and best practices | |
- Complete project structure with configuration and dependencies | |
- Test reporting integration and maintainable code patterns | |
### ποΈ **Advanced Visual Validation** | |
- Multiple comparison types with configurable sensitivity | |
- Detailed difference analysis with impact assessment | |
- Cross-browser and responsive design considerations | |
### π **Enhanced Data Export** | |
- Structured CSV exports with timestamps for all features | |
- Comprehensive metadata tracking and version control | |
- Professional reporting formats for stakeholder communication | |
--- | |
*Made with β€οΈ using Gradio and OpenAI GPT-4 | Enhanced with better prompts and accuracy* | |
""") | |
return app | |
# Launch the application | |
if __name__ == "__main__": | |
app = create_gradio_interface() | |
app.launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
share=True, | |
debug=True | |
) |