#!/usr/bin/env python3
"""
Basic benchmark example - Test vLLM and Ollama performance.

This example demonstrates how to use the vllm-benchmark library
to compare vLLM and Ollama performance on hydrology domain questions.
"""

import asyncio
import sys
sys.path.insert(0, '..')

from src.config import VLLMConfig, OllamaConfig, TestConfig
from src.clients import VLLMClient, OllamaClient
from src.benchmark import BenchmarkRunner, ResultReporter


async def main():
    """Run basic benchmark comparing vLLM and Ollama."""
    print("="*80)
    print("🌊 vLLM vs Ollama Benchmark - Hydrology Domain")
    print("="*80)
    
    # Configure clients
    vllm_config = VLLMConfig()
    ollama_config = OllamaConfig()
    
    vllm_client = VLLMClient(vllm_config)
    ollama_client = OllamaClient(ollama_config)
    
    # Configure test parameters
    test_config = TestConfig(
        concurrent_levels=[1, 5, 10],  # Test with fewer levels for quick demo
        max_tokens=512,
        temperature=0.7
    )
    
    # Run benchmark
    runner = BenchmarkRunner(test_config)
    
    print("\n⚠️  Note: This will automatically stop vLLM and start Ollama")
    print("   Ensure vLLM is running before starting!")
    print("\nPress Enter to continue or Ctrl+C to cancel...")
    # input()  # Uncomment for interactive mode
    
    results = await runner.run_full_benchmark(
        vllm_client,
        ollama_client,
        switch_services=True  # Automatically manage service lifecycle
    )
    
    # Generate report
    reporter = ResultReporter()
    reporter.print_full_report(results)


if __name__ == "__main__":
    asyncio.run(main())

