|
|
""" |
|
|
Simplified Load Testing - Synchronous Version |
|
|
""" |
|
|
|
|
|
import requests |
|
|
import time |
|
|
import statistics |
|
|
from datetime import datetime |
|
|
|
|
|
class SimpleLoadTester: |
|
|
def __init__(self, base_url="http://localhost:7860"): |
|
|
self.base_url = base_url |
|
|
self.results = [] |
|
|
|
|
|
def make_request(self, endpoint): |
|
|
start = time.time() |
|
|
try: |
|
|
response = requests.get(f"{self.base_url}{endpoint}", timeout=5) |
|
|
latency_ms = (time.time() - start) * 1000 |
|
|
return { |
|
|
'success': response.status_code == 200, |
|
|
'latency_ms': latency_ms, |
|
|
'status_code': response.status_code, |
|
|
'endpoint': endpoint |
|
|
} |
|
|
except Exception as e: |
|
|
latency_ms = (time.time() - start) * 1000 |
|
|
return { |
|
|
'success': False, |
|
|
'latency_ms': latency_ms, |
|
|
'status_code': 0, |
|
|
'endpoint': endpoint, |
|
|
'error': str(e) |
|
|
} |
|
|
|
|
|
def run_test(self, endpoint, num_requests=50): |
|
|
print(f"\n{'='*60}") |
|
|
print(f"Testing: {endpoint}") |
|
|
print(f"Requests: {num_requests}") |
|
|
print(f"{'='*60}") |
|
|
|
|
|
results = [] |
|
|
for i in range(num_requests): |
|
|
result = self.make_request(endpoint) |
|
|
results.append(result) |
|
|
self.results.append(result) |
|
|
|
|
|
if (i + 1) % 10 == 0: |
|
|
print(f"Progress: {i+1}/{num_requests}") |
|
|
|
|
|
|
|
|
successes = [r for r in results if r['success']] |
|
|
failures = [r for r in results if not r['success']] |
|
|
latencies = [r['latency_ms'] for r in successes] |
|
|
|
|
|
print(f"\nResults for {endpoint}:") |
|
|
print(f" Total Requests: {len(results)}") |
|
|
print(f" Successful: {len(successes)} ({len(successes)/len(results)*100:.1f}%)") |
|
|
print(f" Failed: {len(failures)} ({len(failures)/len(results)*100:.1f}%)") |
|
|
|
|
|
if latencies: |
|
|
print(f"\nLatency Statistics:") |
|
|
print(f" Mean: {statistics.mean(latencies):.2f} ms") |
|
|
print(f" Median: {statistics.median(latencies):.2f} ms") |
|
|
print(f" Min: {min(latencies):.2f} ms") |
|
|
print(f" Max: {max(latencies):.2f} ms") |
|
|
if len(latencies) > 1: |
|
|
print(f" Std Dev: {statistics.stdev(latencies):.2f} ms") |
|
|
|
|
|
def verify_monitoring_accuracy(self): |
|
|
print(f"\n{'='*60}") |
|
|
print("VERIFYING MONITORING ACCURACY") |
|
|
print(f"{'='*60}") |
|
|
|
|
|
|
|
|
response = requests.get(f"{self.base_url}/health/dashboard") |
|
|
initial_data = response.json() |
|
|
initial_count = initial_data['system']['total_requests'] |
|
|
print(f"Initial request count: {initial_count}") |
|
|
|
|
|
|
|
|
print(f"\nMaking 20 test requests...") |
|
|
for i in range(20): |
|
|
self.make_request("/health") |
|
|
|
|
|
time.sleep(1) |
|
|
|
|
|
|
|
|
response = requests.get(f"{self.base_url}/health/dashboard") |
|
|
final_data = response.json() |
|
|
final_count = final_data['system']['total_requests'] |
|
|
print(f"Final request count: {final_count}") |
|
|
|
|
|
actual_increase = final_count - initial_count |
|
|
expected_increase = 20 |
|
|
|
|
|
print(f"\nMonitoring Accuracy:") |
|
|
print(f" Expected increase: {expected_increase}") |
|
|
print(f" Actual increase: {actual_increase}") |
|
|
print(f" Accuracy: {(actual_increase/expected_increase*100):.1f}%") |
|
|
|
|
|
if actual_increase >= expected_increase * 0.95: |
|
|
print(f" PASS: Monitoring is accurately tracking requests") |
|
|
else: |
|
|
print(f" WARNING: Monitoring may have tracking issues") |
|
|
|
|
|
def test_cache_effectiveness(self): |
|
|
print(f"\n{'='*60}") |
|
|
print("TESTING CACHE EFFECTIVENESS") |
|
|
print(f"{'='*60}") |
|
|
|
|
|
|
|
|
response = requests.get(f"{self.base_url}/health/dashboard") |
|
|
initial_data = response.json() |
|
|
initial_hits = initial_data['cache']['hits'] |
|
|
initial_misses = initial_data['cache']['misses'] |
|
|
initial_hit_rate = initial_data['cache']['hit_rate'] |
|
|
|
|
|
print(f"Initial cache state:") |
|
|
print(f" Hits: {initial_hits}") |
|
|
print(f" Misses: {initial_misses}") |
|
|
print(f" Hit Rate: {(initial_hit_rate * 100):.1f}%") |
|
|
|
|
|
|
|
|
print(f"\nMaking 30 requests to test caching...") |
|
|
for i in range(30): |
|
|
self.make_request("/health/dashboard") |
|
|
|
|
|
time.sleep(1) |
|
|
|
|
|
|
|
|
response = requests.get(f"{self.base_url}/health/dashboard") |
|
|
final_data = response.json() |
|
|
final_hits = final_data['cache']['hits'] |
|
|
final_misses = final_data['cache']['misses'] |
|
|
final_hit_rate = final_data['cache']['hit_rate'] |
|
|
|
|
|
print(f"\nFinal cache state:") |
|
|
print(f" Hits: {final_hits}") |
|
|
print(f" Misses: {final_misses}") |
|
|
print(f" Hit Rate: {(final_hit_rate * 100):.1f}%") |
|
|
|
|
|
print(f"\nCache Performance:") |
|
|
print(f" Hit increase: {final_hits - initial_hits}") |
|
|
print(f" Miss increase: {final_misses - initial_misses}") |
|
|
print(f" Current hit rate: {(final_hit_rate * 100):.1f}%") |
|
|
|
|
|
def generate_report(self): |
|
|
print(f"\n{'='*60}") |
|
|
print("COMPREHENSIVE LOAD TEST REPORT") |
|
|
print(f"{'='*60}") |
|
|
print(f"Generated: {datetime.now().isoformat()}") |
|
|
|
|
|
if not self.results: |
|
|
print("No test results available") |
|
|
return |
|
|
|
|
|
total = len(self.results) |
|
|
successes = [r for r in self.results if r['success']] |
|
|
failures = [r for r in self.results if not r['success']] |
|
|
|
|
|
print(f"\nOverall Statistics:") |
|
|
print(f" Total Requests: {total}") |
|
|
print(f" Successful: {len(successes)} ({len(successes)/total*100:.1f}%)") |
|
|
print(f" Failed: {len(failures)} ({len(failures)/total*100:.1f}%)") |
|
|
|
|
|
all_latencies = [r['latency_ms'] for r in successes] |
|
|
if all_latencies: |
|
|
print(f"\nGlobal Latency Statistics:") |
|
|
print(f" Mean: {statistics.mean(all_latencies):.2f} ms") |
|
|
print(f" Median: {statistics.median(all_latencies):.2f} ms") |
|
|
print(f" Min: {min(all_latencies):.2f} ms") |
|
|
print(f" Max: {max(all_latencies):.2f} ms") |
|
|
|
|
|
|
|
|
endpoints = set(r['endpoint'] for r in self.results) |
|
|
print(f"\nBreakdown by Endpoint:") |
|
|
for endpoint in sorted(endpoints): |
|
|
endpoint_results = [r for r in self.results if r['endpoint'] == endpoint] |
|
|
endpoint_successes = [r for r in endpoint_results if r['success']] |
|
|
print(f" {endpoint}:") |
|
|
print(f" Requests: {len(endpoint_results)}") |
|
|
print(f" Success Rate: {len(endpoint_successes)/len(endpoint_results)*100:.1f}%") |
|
|
if endpoint_successes: |
|
|
latencies = [r['latency_ms'] for r in endpoint_successes] |
|
|
print(f" Avg Latency: {statistics.mean(latencies):.2f} ms") |
|
|
|
|
|
print(f"\nLoad testing complete!") |
|
|
|
|
|
def main(): |
|
|
print("="*60) |
|
|
print("MEDICAL AI PLATFORM - MONITORING LOAD TEST") |
|
|
print("="*60) |
|
|
print(f"Target: http://localhost:7860") |
|
|
print(f"Started: {datetime.now().isoformat()}") |
|
|
|
|
|
tester = SimpleLoadTester() |
|
|
|
|
|
try: |
|
|
|
|
|
tester.run_test("/health", num_requests=50) |
|
|
|
|
|
|
|
|
tester.run_test("/health/dashboard", num_requests=30) |
|
|
|
|
|
|
|
|
tester.run_test("/admin/cache/statistics", num_requests=20) |
|
|
|
|
|
|
|
|
tester.verify_monitoring_accuracy() |
|
|
|
|
|
|
|
|
tester.test_cache_effectiveness() |
|
|
|
|
|
|
|
|
tester.generate_report() |
|
|
|
|
|
print(f"\n{'='*60}") |
|
|
print("ALL TESTS COMPLETED SUCCESSFULLY") |
|
|
print(f"{'='*60}") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"\nTest failed with error: {str(e)}") |
|
|
raise |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|