"""Benchmark runner for executing concurrent load tests."""

import asyncio
import time
import statistics
from typing import List, Optional
import aiohttp

from ..config import (
    RequestResult,
    BenchmarkResult,
    HYDROLOGY_QUESTIONS,
    TestConfig,
    DEFAULT_TEST_CONFIG
)
from ..clients.base import BaseLLMClient


class BenchmarkRunner:
    """Executes benchmark tests for LLM backends."""
    
    def __init__(self, test_config: TestConfig = None):
        """
        Initialize the benchmark runner.
        
        Args:
            test_config: TestConfig instance. Uses default if None.
        """
        self.test_config = test_config or DEFAULT_TEST_CONFIG
    
    async def run_concurrent_requests(
        self,
        client: BaseLLMClient,
        questions: List[str],
        concurrent_level: int
    ) -> List[RequestResult]:
        """
        Execute concurrent requests with controlled parallelism.
        
        Args:
            client: LLM client instance
            questions: List of questions to ask
            concurrent_level: Number of concurrent requests
            
        Returns:
            List of RequestResult objects
        """
        results = []
        
        async with aiohttp.ClientSession() as session:
            tasks = []
            for i, question in enumerate(questions):
                task = client.generate(
                    session,
                    question,
                    max_tokens=self.test_config.max_tokens,
                    temperature=self.test_config.temperature
                )
                tasks.append(task)
                
                # Control concurrency level
                if (i + 1) % concurrent_level == 0 or i == len(questions) - 1:
                    batch_results = await asyncio.gather(*tasks)
                    results.extend(batch_results)
                    tasks = []
        
        return results
    
    async def benchmark_backend(
        self,
        client: BaseLLMClient,
        concurrent_level: int,
        backend_name: str = None
    ) -> Optional[BenchmarkResult]:
        """
        Run benchmark for a single backend at given concurrency.
        
        Args:
            client: LLM client instance
            concurrent_level: Number of concurrent requests
            backend_name: Optional backend name override
            
        Returns:
            BenchmarkResult or None if all requests failed
        """
        backend_name = backend_name or client.get_backend_name()
        
        print(f"\n{'='*60}")
        print(f"Testing {backend_name} - Concurrency: {concurrent_level}")
        print(f"{'='*60}")
        
        # Prepare test questions
        num_requests = min(len(HYDROLOGY_QUESTIONS), concurrent_level * 3)
        questions = (HYDROLOGY_QUESTIONS * ((num_requests // len(HYDROLOGY_QUESTIONS)) + 1))[:num_requests]
        
        # Execute benchmark
        start_time = time.time()
        results = await self.run_concurrent_requests(client, questions, concurrent_level)
        total_time = time.time() - start_time
        
        # Analyze results
        successful = [r for r in results if r.success]
        failed = [r for r in results if not r.success]
        
        if not successful:
            print(f"❌ All requests failed!")
            for r in failed[:3]:
                print(f"   Error: {r.error}")
            return None
        
        latencies = [r.latency for r in successful]
        tokens = [r.tokens for r in successful]
        
        benchmark = BenchmarkResult(
            backend=backend_name,
            concurrent_level=concurrent_level,
            total_requests=len(results),
            successful_requests=len(successful),
            failed_requests=len(failed),
            total_time=total_time,
            throughput=len(successful) / total_time if total_time > 0 else 0,
            avg_latency=statistics.mean(latencies),
            p50_latency=statistics.median(latencies),
            p95_latency=statistics.quantiles(latencies, n=20)[18] if len(latencies) > 1 else latencies[0],
            p99_latency=statistics.quantiles(latencies, n=100)[98] if len(latencies) > 1 else latencies[0],
            tokens_per_second=sum(tokens) / total_time if total_time > 0 else 0,
        )
        
        # Print results
        self._print_results(benchmark, failed)
        
        return benchmark
    
    def _print_results(self, benchmark: BenchmarkResult, failed: List[RequestResult]):
        """Print benchmark results to console."""
        print(f"\n📊 Results:")
        print(f"   ✅ Success: {benchmark.successful_requests}/{benchmark.total_requests}")
        print(f"   ⏱️  Total Time: {benchmark.total_time:.2f}s")
        print(f"   🚀 Throughput: {benchmark.throughput:.2f} req/s")
        print(f"   📈 Token Rate: {benchmark.tokens_per_second:.2f} tokens/s")
        print(f"   ⏰ Avg Latency: {benchmark.avg_latency:.2f}s")
        print(f"   📊 P50 Latency: {benchmark.p50_latency:.2f}s")
        print(f"   📊 P95 Latency: {benchmark.p95_latency:.2f}s")
        print(f"   📊 P99 Latency: {benchmark.p99_latency:.2f}s")
        
        if failed:
            print(f"\n   ⚠️  Failed Requests: {len(failed)}")
            for r in failed[:2]:
                print(f"      - {r.error[:100]}")
    
    async def run_full_benchmark(
        self,
        vllm_client: BaseLLMClient,
        ollama_client: BaseLLMClient,
        switch_services: bool = True
    ) -> List[BenchmarkResult]:
        """
        Run full benchmark suite for both backends.
        
        Args:
            vllm_client: vLLM client instance
            ollama_client: Ollama client instance
            switch_services: Whether to automatically stop/start services
            
        Returns:
            List of all BenchmarkResult objects
        """
        all_results = []
        
        # Phase 1: Test vLLM
        print("\n" + "="*80)
        print("Phase 1/2: Testing vLLM")
        print("="*80)
        
        for concurrent_level in self.test_config.concurrent_levels:
            print(f"\n{'#'*80}")
            print(f"# vLLM - Concurrency: {concurrent_level}")
            print(f"{'#'*80}")
            
            try:
                result = await self.benchmark_backend(vllm_client, concurrent_level, "vLLM")
                if result:
                    all_results.append(result)
            except Exception as e:
                print(f"\n❌ vLLM test failed: {e}")
            
            await asyncio.sleep(2)
        
        # Switch services if requested
        if switch_services:
            from ..utils.service_manager import ServiceManager
            manager = ServiceManager()
            
            print("\n" + "="*80)
            print("🔄 Switching Services: vLLM → Ollama")
            print("="*80)
            
            manager.stop_vllm()
            manager.start_ollama()
            
            print("\n⏳ Waiting for Ollama to start (10s)...")
            await asyncio.sleep(10)
        
        # Phase 2: Test Ollama
        print("\n" + "="*80)
        print("Phase 2/2: Testing Ollama")
        print("="*80)
        
        for concurrent_level in self.test_config.concurrent_levels:
            print(f"\n{'#'*80}")
            print(f"# Ollama - Concurrency: {concurrent_level}")
            print(f"{'#'*80}")
            
            try:
                result = await self.benchmark_backend(ollama_client, concurrent_level, "Ollama")
                if result:
                    all_results.append(result)
            except Exception as e:
                print(f"\n❌ Ollama test failed: {e}")
            
            await asyncio.sleep(2)
        
        return all_results

