#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import json
import random
import asyncio
import aiohttp
import time
import logging
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Any, Optional

class PerformanceTester:
    def __init__(self, config_path: str = "config.json", questions_path: str = "questions.json"):
        """Initialize the performance tester with configuration and questions."""
        self.config = self._load_config(config_path)
        self.questions = self._load_questions(questions_path)
        self.results = []
        self.setup_logging()
        
    def _load_config(self, config_path: str) -> Dict[str, Any]:
        """Load configuration from JSON file."""
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        except FileNotFoundError:
            raise Exception(f"Config file {config_path} not found")
        except json.JSONDecodeError:
            raise Exception(f"Invalid JSON in config file {config_path}")
    
    def _load_questions(self, questions_path: str) -> List[str]:
        """Load questions from JSON file."""
        try:
            with open(questions_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        except FileNotFoundError:
            raise Exception(f"Questions file {questions_path} not found")
        except json.JSONDecodeError:
            raise Exception(f"Invalid JSON in questions file {questions_path}")
    
    def setup_logging(self):
        """Setup logging configuration."""
        log_dir = Path("logs")
        log_dir.mkdir(exist_ok=True)
        
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        log_file = log_dir / f"performance_test_{timestamp}.log"
        
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_file, encoding='utf-8'),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger(__name__)
    
    async def make_request(self, session: aiohttp.ClientSession, question: str, request_id: int) -> Dict[str, Any]:
        """Make a single API request to the model."""
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.config.get('api_key', '')}"
        }
        
        payload = {
            "model": self.config["model"],
            "prompt": question,
            "max_tokens": self.config["max_tokens"],
            "temperature": self.config["temperature"],
            "stream": False
        }
        
        start_time = time.time()
        response_time = None
        char_count = 0
        success = False
        
        try:
            async with session.post(
                self.config["url"],
                headers=headers,
                json=payload,
                timeout=aiohttp.ClientTimeout(total=300)
            ) as response:
                response_time = time.time() - start_time
                
                if response.status == 200:
                    result = await response.json()
                    content = result.get('choices', [{}])[0].get('text', '') if 'choices' in result else result.get('response', '')
                    char_count = len(content)
                    success = True
                    self.logger.info(f"Request {request_id}: Success - {response_time:.2f}s, {char_count} chars")
                else:
                    error_text = await response.text()
                    self.logger.error(f"Request {request_id}: HTTP {response.status} - {error_text}")
                    
        except asyncio.TimeoutError:
            self.logger.error(f"Request {request_id}: Timeout after {time.time() - start_time:.2f}s")
        except aiohttp.ClientError as e:
            self.logger.error(f"Request {request_id}: Client error - {str(e)}")
        except Exception as e:
            self.logger.error(f"Request {request_id}: Unexpected error - {str(e)}")
        
        return {
            "request_id": request_id,
            "question": question,
            "response_time": response_time,
            "char_count": char_count,
            "success": success,
            "start_time": start_time
        }
    
    async def run_concurrency_test(self, concurrency_level: int, num_requests: int = 50) -> Dict[str, Any]:
        """Run performance test for a specific concurrency level."""
        self.logger.info(f"Starting concurrency test with {concurrency_level} concurrent requests")
        
        connector = aiohttp.TCPConnector(limit=concurrency_level)
        async with aiohttp.ClientSession(connector=connector) as session:
            tasks = []
            request_results = []
            
            # Create tasks
            for i in range(num_requests):
                question = random.choice(self.questions)
                task = self.make_request(session, question, i + 1)
                tasks.append(task)
            
            # Run tasks with limited concurrency
            semaphore = asyncio.Semaphore(concurrency_level)
            
            async def limited_task(task):
                async with semaphore:
                    return await task
            
            limited_tasks = [limited_task(task) for task in tasks]
            results = await asyncio.gather(*limited_tasks, return_exceptions=True)
            
            # Process results
            for result in results:
                if isinstance(result, Exception):
                    self.logger.error(f"Task failed with exception: {result}")
                    request_results.append({
                        "success": False,
                        "response_time": None,
                        "char_count": 0
                    })
                else:
                    request_results.append(result)
            
            # Calculate statistics
            successful_requests = [r for r in request_results if r["success"] and r["response_time"] is not None]
            failed_requests = len(request_results) - len(successful_requests)
            
            if successful_requests:
                response_times = [r["response_time"] for r in successful_requests]
                char_counts = [r["char_count"] for r in successful_requests]
                
                stats = {
                    "concurrency_level": concurrency_level,
                    "total_requests": num_requests,
                    "successful_requests": len(successful_requests),
                    "failed_requests": failed_requests,
                    "success_rate": len(successful_requests) / num_requests,
                    "avg_response_time": sum(response_times) / len(response_times),
                    "min_response_time": min(response_times),
                    "max_response_time": max(response_times),
                    "avg_chars_per_response": sum(char_counts) / len(char_counts),
                    "total_chars": sum(char_counts),
                    "throughput": len(successful_requests) / sum(response_times) if sum(response_times) > 0 else 0
                }
            else:
                stats = {
                    "concurrency_level": concurrency_level,
                    "total_requests": num_requests,
                    "successful_requests": 0,
                    "failed_requests": failed_requests,
                    "success_rate": 0,
                    "avg_response_time": 0,
                    "min_response_time": 0,
                    "max_response_time": 0,
                    "avg_chars_per_response": 0,
                    "total_chars": 0,
                    "throughput": 0
                }
            
            self.logger.info(f"Concurrency {concurrency_level} results: {stats}")
            return stats
    
    async def run_all_tests(self):
        """Run performance tests for all concurrency levels."""
        self.logger.info("Starting all performance tests")
        
        for concurrency in self.config["concurrency"]:
            test_results = await self.run_concurrency_test(concurrency)
            self.results.append(test_results)
            
            # Save intermediate results
            self.save_results()
            
            # Brief pause between tests
            await asyncio.sleep(2)
    
    def save_results(self):
        """Save test results to JSON file."""
        result_dir = Path("results")
        result_dir.mkdir(exist_ok=True)
        
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        result_file = result_dir / f"performance_results_{timestamp}.json"
        
        with open(result_file, 'w', encoding='utf-8') as f:
            json.dump(self.results, f, indent=2, ensure_ascii=False)
        
        self.logger.info(f"Results saved to {result_file}")
    
    def generate_charts(self):
        """Generate performance charts from test results."""
        if not self.results:
            self.logger.warning("No results available for chart generation")
            return
        
        result_dir = Path("results")
        result_dir.mkdir(exist_ok=True)
        
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # Extract data for plotting
        concurrency_levels = [r["concurrency_level"] for r in self.results]
        avg_response_times = [r["avg_response_time"] for r in self.results]
        success_rates = [r["success_rate"] * 100 for r in self.results]  # Convert to percentage
        throughput = [r["throughput"] for r in self.results]
        avg_chars = [r["avg_chars_per_response"] for r in self.results]
        
        # Create subplots
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle('AI Model Performance Test Results', fontsize=16, fontweight='bold')
        
        # Plot 1: Response Time vs Concurrency
        ax1.plot(concurrency_levels, avg_response_times, 'bo-', linewidth=2, markersize=8)
        ax1.set_xlabel('Concurrency Level')
        ax1.set_ylabel('Average Response Time (seconds)')
        ax1.set_title('Response Time vs Concurrency')
        ax1.grid(True, alpha=0.3)
        
        # Plot 2: Success Rate vs Concurrency
        ax2.plot(concurrency_levels, success_rates, 'go-', linewidth=2, markersize=8)
        ax2.set_xlabel('Concurrency Level')
        ax2.set_ylabel('Success Rate (%)')
        ax2.set_title('Success Rate vs Concurrency')
        ax2.set_ylim(0, 105)
        ax2.grid(True, alpha=0.3)
        
        # Plot 3: Throughput vs Concurrency
        ax3.plot(concurrency_levels, throughput, 'ro-', linewidth=2, markersize=8)
        ax3.set_xlabel('Concurrency Level')
        ax3.set_ylabel('Throughput (requests/second)')
        ax3.set_title('Throughput vs Concurrency')
        ax3.grid(True, alpha=0.3)
        
        # Plot 4: Average Characters vs Concurrency
        ax4.plot(concurrency_levels, avg_chars, 'mo-', linewidth=2, markersize=8)
        ax4.set_xlabel('Concurrency Level')
        ax4.set_ylabel('Average Characters per Response')
        ax4.set_title('Response Length vs Concurrency')
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        chart_file = result_dir / f"performance_charts_{timestamp}.png"
        plt.savefig(chart_file, dpi=300, bbox_inches='tight')
        plt.close()
        
        self.logger.info(f"Charts saved to {chart_file}")
        
        # Also save a comprehensive report
        self.generate_report(timestamp)
    
    def generate_report(self, timestamp: str):
        """Generate a comprehensive text report."""
        result_dir = Path("results")
        report_file = result_dir / f"performance_report_{timestamp}.txt"
        
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write("AI Model Performance Test Report\n")
            f.write("=" * 50 + "\n\n")
            f.write(f"Test Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(f"Model: {self.config['model']}\n")
            f.write(f"API Endpoint: {self.config['url']}\n\n")
            
            f.write("Test Configuration:\n")
            f.write(f"- Max Tokens: {self.config['max_tokens']}\n")
            f.write(f"- Temperature: {self.config['temperature']}\n")
            f.write(f"- Concurrency Levels Tested: {self.config['concurrency']}\n\n")
            
            f.write("Results Summary:\n")
            f.write("=" * 50 + "\n")
            
            for result in self.results:
                f.write(f"\nConcurrency Level: {result['concurrency_level']}\n")
                f.write(f"  Total Requests: {result['total_requests']}\n")
                f.write(f"  Successful: {result['successful_requests']}\n")
                f.write(f"  Failed: {result['failed_requests']}\n")
                f.write(f"  Success Rate: {result['success_rate']:.2%}\n")
                f.write(f"  Avg Response Time: {result['avg_response_time']:.2f}s\n")
                f.write(f"  Min Response Time: {result['min_response_time']:.2f}s\n")
                f.write(f"  Max Response Time: {result['max_response_time']:.2f}s\n")
                f.write(f"  Avg Chars/Response: {result['avg_chars_per_response']:.0f}\n")
                f.write(f"  Throughput: {result['throughput']:.2f} req/s\n")
        
        self.logger.info(f"Report saved to {report_file}")

async def main():
    """Main function to run the performance tests."""
    try:
        tester = PerformanceTester()
        await tester.run_all_tests()
        tester.generate_charts()
        tester.logger.info("All performance tests completed successfully!")
        
    except Exception as e:
        logging.error(f"Performance test failed: {str(e)}")
        raise

if __name__ == "__main__":
    asyncio.run(main())