|
|
""" |
|
|
Minimal Test Server - No Heavy Dependencies |
|
|
Just monitoring infrastructure for load testing |
|
|
""" |
|
|
|
|
|
from fastapi import FastAPI, Request |
|
|
from fastapi.middleware.cors import CORSMiddleware |
|
|
from typing import Dict, Any |
|
|
from datetime import datetime, timedelta |
|
|
import uuid |
|
|
import logging |
|
|
import time |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
app = FastAPI(title="Medical AI Platform - Test Server", version="2.0.0") |
|
|
|
|
|
app.add_middleware( |
|
|
CORSMiddleware, |
|
|
allow_origins=["*"], |
|
|
allow_credentials=True, |
|
|
allow_methods=["*"], |
|
|
allow_headers=["*"], |
|
|
) |
|
|
|
|
|
|
|
|
class SimpleMonitoring: |
|
|
def __init__(self): |
|
|
self.start_time = datetime.utcnow() |
|
|
self.request_count = 0 |
|
|
self.error_count = 0 |
|
|
self.latencies = [] |
|
|
self.cache_hits = 0 |
|
|
self.cache_misses = 0 |
|
|
self.cache_entries = 0 |
|
|
|
|
|
def track_request(self, latency_ms: float, success: bool): |
|
|
self.request_count += 1 |
|
|
self.latencies.append(latency_ms) |
|
|
if not success: |
|
|
self.error_count += 1 |
|
|
|
|
|
def get_stats(self): |
|
|
uptime = (datetime.utcnow() - self.start_time).total_seconds() |
|
|
error_rate = self.error_count / max(self.request_count, 1) |
|
|
avg_latency = sum(self.latencies) / max(len(self.latencies), 1) |
|
|
|
|
|
return { |
|
|
"status": "operational" if error_rate < 0.05 else "degraded", |
|
|
"uptime_seconds": uptime, |
|
|
"uptime_human": f"{int(uptime//3600)}h {int((uptime%3600)//60)}m", |
|
|
"total_requests": self.request_count, |
|
|
"error_count": self.error_count, |
|
|
"error_rate": error_rate, |
|
|
"error_threshold": 0.05, |
|
|
"avg_latency_ms": avg_latency, |
|
|
"cache": { |
|
|
"hits": self.cache_hits, |
|
|
"misses": self.cache_misses, |
|
|
"hit_rate": self.cache_hits / max(self.cache_hits + self.cache_misses, 1), |
|
|
"total_entries": self.cache_entries, |
|
|
"memory_usage_mb": self.cache_entries * 0.1 |
|
|
} |
|
|
} |
|
|
|
|
|
monitoring = SimpleMonitoring() |
|
|
|
|
|
@app.middleware("http") |
|
|
async def monitoring_middleware(request: Request, call_next): |
|
|
start_time = time.time() |
|
|
try: |
|
|
response = await call_next(request) |
|
|
latency_ms = (time.time() - start_time) * 1000 |
|
|
monitoring.track_request(latency_ms, response.status_code < 400) |
|
|
return response |
|
|
except Exception as e: |
|
|
latency_ms = (time.time() - start_time) * 1000 |
|
|
monitoring.track_request(latency_ms, False) |
|
|
raise |
|
|
|
|
|
@app.get("/health") |
|
|
async def health_check(): |
|
|
stats = monitoring.get_stats() |
|
|
return { |
|
|
"status": stats["status"], |
|
|
"components": {"monitoring": "active"}, |
|
|
"monitoring": { |
|
|
"uptime_seconds": stats["uptime_seconds"], |
|
|
"error_rate": stats["error_rate"], |
|
|
"active_alerts": 0, |
|
|
"critical_alerts": 0 |
|
|
}, |
|
|
"timestamp": datetime.utcnow().isoformat() |
|
|
} |
|
|
|
|
|
@app.get("/health/dashboard") |
|
|
async def get_health_dashboard(): |
|
|
stats = monitoring.get_stats() |
|
|
|
|
|
|
|
|
if monitoring.request_count % 3 == 0: |
|
|
monitoring.cache_hits += 1 |
|
|
else: |
|
|
monitoring.cache_misses += 1 |
|
|
monitoring.cache_entries += 1 |
|
|
|
|
|
return { |
|
|
"status": stats["status"], |
|
|
"timestamp": datetime.utcnow().isoformat(), |
|
|
"system": { |
|
|
"uptime_seconds": stats["uptime_seconds"], |
|
|
"uptime_human": stats["uptime_human"], |
|
|
"error_rate": stats["error_rate"], |
|
|
"total_requests": stats["total_requests"], |
|
|
"error_threshold": 0.05, |
|
|
"status": stats["status"] |
|
|
}, |
|
|
"pipeline": { |
|
|
"total_jobs_processed": 0, |
|
|
"completed_jobs": 0, |
|
|
"failed_jobs": 0, |
|
|
"processing_jobs": 0, |
|
|
"success_rate": 1.0 |
|
|
}, |
|
|
"models": { |
|
|
"total_registered": 6, |
|
|
"performance": { |
|
|
"bio_clinical_bert": { |
|
|
"version": "1.0.0", |
|
|
"total_inferences": 0, |
|
|
"avg_latency_ms": 125.4, |
|
|
"error_rate": 0.01, |
|
|
"last_used": "2025-10-29T15:00:00Z" |
|
|
} |
|
|
} |
|
|
}, |
|
|
"synthesis": { |
|
|
"total_syntheses": 0, |
|
|
"avg_confidence": 0.87, |
|
|
"requiring_review": 0, |
|
|
"avg_processing_time_ms": 850.5 |
|
|
}, |
|
|
"cache": { |
|
|
"total_entries": stats["cache"]["total_entries"], |
|
|
"hits": stats["cache"]["hits"], |
|
|
"misses": stats["cache"]["misses"], |
|
|
"hit_rate": stats["cache"]["hit_rate"], |
|
|
"evictions": 0, |
|
|
"memory_usage_mb": stats["cache"]["memory_usage_mb"], |
|
|
"avg_retrieval_time_ms": 0.5, |
|
|
"cache_efficiency": stats["cache"]["hit_rate"] * 100 |
|
|
}, |
|
|
"alerts": { |
|
|
"active_count": 0, |
|
|
"critical_count": 0, |
|
|
"recent": [] |
|
|
}, |
|
|
"compliance": { |
|
|
"hipaa_compliant": True, |
|
|
"gdpr_compliant": True, |
|
|
"audit_logging_active": True, |
|
|
"phi_removal_active": True, |
|
|
"encryption_enabled": True |
|
|
}, |
|
|
"components": { |
|
|
"monitoring_system": "operational", |
|
|
"versioning_system": "operational", |
|
|
"logging_system": "operational", |
|
|
"compliance_reporting": "operational", |
|
|
"cache_service": "operational" |
|
|
} |
|
|
} |
|
|
|
|
|
@app.get("/admin/cache/statistics") |
|
|
async def cache_statistics(): |
|
|
stats = monitoring.get_stats() |
|
|
return { |
|
|
"statistics": stats["cache"], |
|
|
"recommendations": ["Cache performing within normal parameters."], |
|
|
"timestamp": datetime.utcnow().isoformat() |
|
|
} |
|
|
|
|
|
@app.get("/admin/metrics") |
|
|
async def admin_metrics(): |
|
|
stats = monitoring.get_stats() |
|
|
return { |
|
|
"system": stats, |
|
|
"timestamp": datetime.utcnow().isoformat() |
|
|
} |
|
|
|
|
|
if __name__ == "__main__": |
|
|
import uvicorn |
|
|
uvicorn.run(app, host="0.0.0.0", port=7860, log_level="warning") |
|
|
|