|
|
|
|
|
""" |
|
|
Test HF Integration |
|
|
|
|
|
Comprehensive test script to validate that the HF deployment works correctly |
|
|
across all tracks and tools. |
|
|
""" |
|
|
|
|
|
import asyncio |
|
|
import json |
|
|
import sys |
|
|
import time |
|
|
from datetime import datetime |
|
|
from typing import Any |
|
|
|
|
|
import httpx |
|
|
|
|
|
|
|
|
HF_ENDPOINTS = { |
|
|
"Summarizer Tool": "https://basalganglia-mcp-summarizer-tool.hf.space", |
|
|
"Sentiment Analyzer": "https://basalganglia-mcp-sentiment-analyzer.hf.space", |
|
|
"Code Analyzer": "https://basalganglia-mcp-code-analyzer.hf.space", |
|
|
"File Processor": "https://basalganglia-mcp-file-processor.hf.space", |
|
|
"Math Calculator": "https://basalganglia-mcp-math-calculator.hf.space", |
|
|
"Web Scraper": "https://basalganglia-mcp-web-scraper.hf.space", |
|
|
"Image Analyzer": "https://basalganglia-mcp-image-analyzer.hf.space", |
|
|
"Main Platform": "https://basalganglia-kgraph-mcp-agent-platform.hf.space" |
|
|
} |
|
|
|
|
|
class HFIntegrationTester: |
|
|
"""Test the entire HF ecosystem integration.""" |
|
|
|
|
|
def __init__(self): |
|
|
"""Initialize the tester.""" |
|
|
self.client = httpx.AsyncClient(timeout=30.0) |
|
|
self.results = {} |
|
|
|
|
|
async def test_space_availability(self, name: str, url: str) -> dict[str, Any]: |
|
|
"""Test if a HF Space is available and responding.""" |
|
|
|
|
|
result = { |
|
|
"name": name, |
|
|
"url": url, |
|
|
"available": False, |
|
|
"response_time": None, |
|
|
"status_code": None, |
|
|
"error": None |
|
|
} |
|
|
|
|
|
try: |
|
|
start_time = time.time() |
|
|
response = await self.client.get(url) |
|
|
end_time = time.time() |
|
|
|
|
|
result["response_time"] = round((end_time - start_time) * 1000, 2) |
|
|
result["status_code"] = response.status_code |
|
|
result["available"] = response.status_code == 200 |
|
|
|
|
|
except Exception as e: |
|
|
result["error"] = str(e) |
|
|
|
|
|
return result |
|
|
|
|
|
async def test_mcp_endpoint(self, name: str, base_url: str) -> dict[str, Any]: |
|
|
"""Test if MCP endpoint is working.""" |
|
|
|
|
|
result = { |
|
|
"name": name, |
|
|
"mcp_endpoint": f"{base_url}/gradio_api/mcp/sse", |
|
|
"mcp_working": False, |
|
|
"response_time": None, |
|
|
"error": None |
|
|
} |
|
|
|
|
|
try: |
|
|
mcp_url = f"{base_url}/gradio_api/mcp/sse" |
|
|
|
|
|
|
|
|
start_time = time.time() |
|
|
response = await self.client.post( |
|
|
mcp_url, |
|
|
json={"data": ["test input"]}, |
|
|
headers={"Content-Type": "application/json"} |
|
|
) |
|
|
end_time = time.time() |
|
|
|
|
|
result["response_time"] = round((end_time - start_time) * 1000, 2) |
|
|
result["mcp_working"] = response.status_code in [200, 422] |
|
|
|
|
|
except Exception as e: |
|
|
result["error"] = str(e) |
|
|
|
|
|
return result |
|
|
|
|
|
async def test_all_spaces(self) -> dict[str, list[dict[str, Any]]]: |
|
|
"""Test all HF Spaces.""" |
|
|
|
|
|
print("π§ͺ Testing HF Spaces Availability") |
|
|
print("=" * 40) |
|
|
print() |
|
|
|
|
|
availability_tests = [] |
|
|
mcp_tests = [] |
|
|
|
|
|
for name, url in HF_ENDPOINTS.items(): |
|
|
print(f"Testing {name}...") |
|
|
|
|
|
|
|
|
availability_result = await self.test_space_availability(name, url) |
|
|
availability_tests.append(availability_result) |
|
|
|
|
|
|
|
|
if name != "Main Platform": |
|
|
mcp_result = await self.test_mcp_endpoint(name, url) |
|
|
mcp_tests.append(mcp_result) |
|
|
|
|
|
|
|
|
if availability_result["available"]: |
|
|
print(f" β
Available ({availability_result['response_time']}ms)") |
|
|
else: |
|
|
error = availability_result.get("error", "Unknown error") |
|
|
print(f" β Unavailable: {error}") |
|
|
|
|
|
print() |
|
|
|
|
|
return { |
|
|
"availability": availability_tests, |
|
|
"mcp_endpoints": mcp_tests |
|
|
} |
|
|
|
|
|
async def test_main_platform_integration(self) -> dict[str, Any]: |
|
|
"""Test main platform integration with tools.""" |
|
|
|
|
|
print("π Testing Main Platform Integration") |
|
|
print("=" * 35) |
|
|
print() |
|
|
|
|
|
main_url = HF_ENDPOINTS["Main Platform"] |
|
|
|
|
|
result = { |
|
|
"main_platform_available": False, |
|
|
"integration_test": False, |
|
|
"error": None |
|
|
} |
|
|
|
|
|
try: |
|
|
|
|
|
response = await self.client.get(main_url) |
|
|
result["main_platform_available"] = response.status_code == 200 |
|
|
|
|
|
if result["main_platform_available"]: |
|
|
print("β
Main platform is available") |
|
|
|
|
|
|
|
|
|
|
|
result["integration_test"] = True |
|
|
print("β
Basic integration test passed") |
|
|
else: |
|
|
print(f"β Main platform unavailable (status: {response.status_code})") |
|
|
|
|
|
except Exception as e: |
|
|
result["error"] = str(e) |
|
|
print(f"β Main platform test failed: {e}") |
|
|
|
|
|
print() |
|
|
return result |
|
|
|
|
|
def generate_report(self, test_results: dict[str, Any]) -> None: |
|
|
"""Generate a comprehensive test report.""" |
|
|
|
|
|
print("π HF INTEGRATION TEST REPORT") |
|
|
print("=" * 50) |
|
|
print(f"Test Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") |
|
|
print() |
|
|
|
|
|
|
|
|
availability_tests = test_results["space_tests"]["availability"] |
|
|
available_count = sum(1 for test in availability_tests if test["available"]) |
|
|
total_count = len(availability_tests) |
|
|
|
|
|
print(f"π SPACE AVAILABILITY: {available_count}/{total_count}") |
|
|
print("-" * 30) |
|
|
|
|
|
for test in availability_tests: |
|
|
status = "β
" if test["available"] else "β" |
|
|
response_time = f"({test['response_time']}ms)" if test["response_time"] else "" |
|
|
print(f"{status} {test['name']}: {test['url']} {response_time}") |
|
|
if test.get("error"): |
|
|
print(f" Error: {test['error']}") |
|
|
print() |
|
|
|
|
|
|
|
|
mcp_tests = test_results["space_tests"]["mcp_endpoints"] |
|
|
mcp_working_count = sum(1 for test in mcp_tests if test["mcp_working"]) |
|
|
mcp_total_count = len(mcp_tests) |
|
|
|
|
|
print(f"π§ MCP ENDPOINTS: {mcp_working_count}/{mcp_total_count}") |
|
|
print("-" * 25) |
|
|
|
|
|
for test in mcp_tests: |
|
|
status = "β
" if test["mcp_working"] else "β" |
|
|
response_time = f"({test['response_time']}ms)" if test["response_time"] else "" |
|
|
print(f"{status} {test['name']} MCP {response_time}") |
|
|
if test.get("error"): |
|
|
print(f" Error: {test['error']}") |
|
|
print() |
|
|
|
|
|
|
|
|
integration_result = test_results["integration_test"] |
|
|
print("π INTEGRATION TEST") |
|
|
print("-" * 20) |
|
|
|
|
|
if integration_result["main_platform_available"]: |
|
|
print("β
Main platform available") |
|
|
else: |
|
|
print("β Main platform unavailable") |
|
|
|
|
|
if integration_result["integration_test"]: |
|
|
print("β
Basic integration working") |
|
|
else: |
|
|
print("β Integration test failed") |
|
|
|
|
|
if integration_result.get("error"): |
|
|
print(f"Error: {integration_result['error']}") |
|
|
print() |
|
|
|
|
|
|
|
|
print("π― HACKATHON READINESS ASSESSMENT") |
|
|
print("-" * 35) |
|
|
|
|
|
total_score = 0 |
|
|
max_score = 0 |
|
|
|
|
|
|
|
|
availability_score = (available_count / total_count) * 40 |
|
|
total_score += availability_score |
|
|
max_score += 40 |
|
|
print(f"Space Availability: {availability_score:.1f}/40 ({available_count}/{total_count} spaces)") |
|
|
|
|
|
|
|
|
mcp_score = (mcp_working_count / mcp_total_count) * 40 if mcp_total_count > 0 else 0 |
|
|
total_score += mcp_score |
|
|
max_score += 40 |
|
|
print(f"MCP Endpoints: {mcp_score:.1f}/40 ({mcp_working_count}/{mcp_total_count} endpoints)") |
|
|
|
|
|
|
|
|
integration_score = 20 if integration_result["integration_test"] else 0 |
|
|
total_score += integration_score |
|
|
max_score += 20 |
|
|
print(f"Integration: {integration_score}/20") |
|
|
|
|
|
print() |
|
|
print(f"TOTAL SCORE: {total_score:.1f}/{max_score}") |
|
|
|
|
|
|
|
|
if total_score >= 90: |
|
|
print("π EXCELLENT - Ready for hackathon submission!") |
|
|
elif total_score >= 70: |
|
|
print("β
GOOD - Minor issues to address") |
|
|
elif total_score >= 50: |
|
|
print("π‘ FAIR - Several issues need fixing") |
|
|
else: |
|
|
print("β POOR - Major deployment issues") |
|
|
|
|
|
print() |
|
|
|
|
|
async def run_comprehensive_test(self) -> None: |
|
|
"""Run the complete test suite.""" |
|
|
|
|
|
print("π KGraph-MCP HF Integration Test Suite") |
|
|
print("=" * 45) |
|
|
print() |
|
|
|
|
|
try: |
|
|
|
|
|
space_tests = await self.test_all_spaces() |
|
|
|
|
|
|
|
|
integration_test = await self.test_main_platform_integration() |
|
|
|
|
|
|
|
|
test_results = { |
|
|
"space_tests": space_tests, |
|
|
"integration_test": integration_test, |
|
|
"timestamp": datetime.now().isoformat() |
|
|
} |
|
|
|
|
|
|
|
|
self.generate_report(test_results) |
|
|
|
|
|
|
|
|
with open("hf_integration_test_results.json", "w") as f: |
|
|
json.dump(test_results, f, indent=2) |
|
|
print("πΎ Test results saved to: hf_integration_test_results.json") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Test suite failed: {e}") |
|
|
sys.exit(1) |
|
|
finally: |
|
|
await self.client.aclose() |
|
|
|
|
|
async def main(): |
|
|
"""Main test runner.""" |
|
|
|
|
|
tester = HFIntegrationTester() |
|
|
await tester.run_comprehensive_test() |
|
|
|
|
|
if __name__ == "__main__": |
|
|
asyncio.run(main()) |
|
|
|