Integrate Novita AI as exclusive inference provider - Add Novita AI API integration with DeepSeek-R1-Distill-Qwen-7B model - Remove all local model dependencies - Optimize token allocation for user inputs and context - Add Anaconda environment setup files - Add comprehensive test scripts and documentation
927854c
| #!/usr/bin/env python3 | |
| """ | |
| Test script for Novita AI API connection | |
| Tests configuration, client initialization, and API calls | |
| """ | |
| import os | |
| import sys | |
| import asyncio | |
| from pathlib import Path | |
| # Add project root to path | |
| project_root = Path(__file__).parent | |
| sys.path.insert(0, str(project_root)) | |
| def test_configuration(): | |
| """Test configuration loading""" | |
| print("=" * 60) | |
| print("TEST 1: Configuration Loading") | |
| print("=" * 60) | |
| try: | |
| from src.config import get_settings | |
| settings = get_settings() | |
| print(f"✓ Configuration loaded successfully") | |
| print(f" Novita API Key: {'Set' if settings.novita_api_key else 'NOT SET'}") | |
| print(f" Base URL: {settings.novita_base_url}") | |
| print(f" Model: {settings.novita_model}") | |
| print(f" Temperature: {settings.deepseek_r1_temperature}") | |
| print(f" Force Reasoning: {settings.deepseek_r1_force_reasoning}") | |
| print(f" User Input Max Tokens: {settings.user_input_max_tokens}") | |
| print(f" Context Preparation Budget: {settings.context_preparation_budget}") | |
| if not settings.novita_api_key: | |
| print("\n❌ ERROR: NOVITA_API_KEY is not set!") | |
| print(" Please set it in environment variables or .env file") | |
| return False | |
| return True | |
| except Exception as e: | |
| print(f"❌ Configuration loading failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| return False | |
| def test_openai_package(): | |
| """Test OpenAI package availability""" | |
| print("\n" + "=" * 60) | |
| print("TEST 2: OpenAI Package Check") | |
| print("=" * 60) | |
| try: | |
| from openai import OpenAI | |
| print("✓ OpenAI package is available") | |
| print(f" OpenAI version: {OpenAI.__module__}") | |
| return True | |
| except ImportError as e: | |
| print(f"❌ OpenAI package not available: {e}") | |
| print(" Install with: pip install openai>=1.0.0") | |
| return False | |
| def test_client_initialization(): | |
| """Test Novita AI client initialization""" | |
| print("\n" + "=" * 60) | |
| print("TEST 3: Novita AI Client Initialization") | |
| print("=" * 60) | |
| try: | |
| from src.config import get_settings | |
| from openai import OpenAI | |
| settings = get_settings() | |
| if not settings.novita_api_key: | |
| print("❌ Cannot test - NOVITA_API_KEY not set") | |
| return False | |
| client = OpenAI( | |
| base_url=settings.novita_base_url, | |
| api_key=settings.novita_api_key, | |
| ) | |
| print("✓ Novita AI client initialized successfully") | |
| print(f" Base URL: {settings.novita_base_url}") | |
| print(f" API Key: {settings.novita_api_key[:10]}...{settings.novita_api_key[-4:] if len(settings.novita_api_key) > 14 else '***'}") | |
| return True, client | |
| except Exception as e: | |
| print(f"❌ Client initialization failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| return False, None | |
| def test_simple_api_call(client): | |
| """Test a simple API call to Novita AI""" | |
| print("\n" + "=" * 60) | |
| print("TEST 4: Simple API Call") | |
| print("=" * 60) | |
| if not client: | |
| print("❌ Cannot test - client not initialized") | |
| return False | |
| try: | |
| from src.config import get_settings | |
| settings = get_settings() | |
| print(f"Sending test request to: {settings.novita_model}") | |
| print("Prompt: 'Hello, this is a test. Please respond briefly.'") | |
| response = client.chat.completions.create( | |
| model=settings.novita_model, | |
| messages=[ | |
| {"role": "user", "content": "Hello, this is a test. Please respond briefly."} | |
| ], | |
| max_tokens=50, | |
| temperature=0.6 | |
| ) | |
| if response.choices and len(response.choices) > 0: | |
| result = response.choices[0].message.content | |
| print(f"✓ API call successful!") | |
| print(f" Response length: {len(result)} characters") | |
| print(f" Response preview: {result[:100]}...") | |
| print(f" Model used: {response.model if hasattr(response, 'model') else 'N/A'}") | |
| return True | |
| else: | |
| print("❌ API call returned empty response") | |
| return False | |
| except Exception as e: | |
| print(f"❌ API call failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| return False | |
| def test_llm_router(): | |
| """Test LLM Router initialization and health check""" | |
| print("\n" + "=" * 60) | |
| print("TEST 5: LLM Router Initialization") | |
| print("=" * 60) | |
| try: | |
| from src.llm_router import LLMRouter | |
| print("Initializing LLM Router...") | |
| router = LLMRouter(hf_token=None, use_local_models=False) | |
| print("✓ LLM Router initialized successfully") | |
| # Test health check | |
| print("\nTesting health check...") | |
| async def test_health(): | |
| health = await router.health_check() | |
| return health | |
| health = asyncio.run(test_health()) | |
| print(f"✓ Health check result: {health}") | |
| return True | |
| except Exception as e: | |
| print(f"❌ LLM Router initialization failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| return False | |
| async def test_inference(): | |
| """Test actual inference through LLM Router""" | |
| print("\n" + "=" * 60) | |
| print("TEST 6: Inference Test") | |
| print("=" * 60) | |
| try: | |
| from src.llm_router import LLMRouter | |
| router = LLMRouter(hf_token=None, use_local_models=False) | |
| test_prompt = "What is the capital of France? Answer in one sentence." | |
| print(f"Test prompt: {test_prompt}") | |
| result = await router.route_inference( | |
| task_type="general_reasoning", | |
| prompt=test_prompt, | |
| max_tokens=100, | |
| temperature=0.6 | |
| ) | |
| if result: | |
| print(f"✓ Inference successful!") | |
| print(f" Response length: {len(result)} characters") | |
| print(f" Response: {result}") | |
| return True | |
| else: | |
| print("❌ Inference returned None") | |
| return False | |
| except Exception as e: | |
| print(f"❌ Inference test failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| return False | |
| def main(): | |
| """Run all tests""" | |
| print("\n" + "=" * 60) | |
| print("NOVITA AI CONNECTION TEST") | |
| print("=" * 60) | |
| print() | |
| results = {} | |
| # Test 1: Configuration | |
| results['config'] = test_configuration() | |
| if not results['config']: | |
| print("\n❌ Configuration test failed. Please check your environment variables.") | |
| return | |
| # Test 2: OpenAI package | |
| results['package'] = test_openai_package() | |
| if not results['package']: | |
| print("\n❌ Package test failed. Please install: pip install openai>=1.0.0") | |
| return | |
| # Test 3: Client initialization | |
| client_init_result = test_client_initialization() | |
| if isinstance(client_init_result, tuple): | |
| results['client'] = client_init_result[0] | |
| client = client_init_result[1] | |
| else: | |
| results['client'] = client_init_result | |
| client = None | |
| if not results['client']: | |
| print("\n❌ Client initialization failed. Check your API key and base URL.") | |
| return | |
| # Test 4: Simple API call | |
| results['api_call'] = test_simple_api_call(client) | |
| # Test 5: LLM Router | |
| results['router'] = test_llm_router() | |
| # Test 6: Inference | |
| if results['router']: | |
| results['inference'] = asyncio.run(test_inference()) | |
| # Summary | |
| print("\n" + "=" * 60) | |
| print("TEST SUMMARY") | |
| print("=" * 60) | |
| total_tests = len(results) | |
| passed_tests = sum(1 for v in results.values() if v) | |
| for test_name, result in results.items(): | |
| status = "✓ PASS" if result else "❌ FAIL" | |
| print(f" {test_name.upper()}: {status}") | |
| print(f"\nTotal: {passed_tests}/{total_tests} tests passed") | |
| if passed_tests == total_tests: | |
| print("\n🎉 All tests passed! Novita AI connection is working correctly.") | |
| return 0 | |
| else: | |
| print("\n⚠️ Some tests failed. Please review the errors above.") | |
| return 1 | |
| if __name__ == "__main__": | |
| exit_code = main() | |
| sys.exit(exit_code) | |