#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import json
import random
import asyncio
import aiohttp
import time
import logging
import math
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Any, Optional

class StressTester:
    def __init__(self, config_path: str = "config.json", questions_path: str = "questions.json"):
        """Initialize the stress tester with configuration and questions."""
        self.config = self._load_config(config_path)
        self.questions = self._load_questions(questions_path)
        self.results = []
        self.max_successful_concurrency = 0
        self.first_failed_concurrency = None
        self.setup_logging()
        
    def _load_config(self, config_path: str) -> Dict[str, Any]:
        """Load configuration from JSON file."""
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                config = json.load(f)
                # Set default values if not specified
                config.setdefault('concurrency_levels', [1, 2, 3, 4, 5, 6])
                config.setdefault('test_timeout_seconds', 600)
                config.setdefault('success_rate_threshold', 0.9)
                config.setdefault('failure_rate_threshold', 0.1)
                return config
        except FileNotFoundError:
            raise Exception(f"Config file {config_path} not found")
        except json.JSONDecodeError:
            raise Exception(f"Invalid JSON in config file {config_path}")
    
    def _load_questions(self, questions_path: str) -> List[str]:
        """Load questions from JSON file."""
        try:
            with open(questions_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        except FileNotFoundError:
            raise Exception(f"Questions file {questions_path} not found")
        except json.JSONDecodeError:
            raise Exception(f"Invalid JSON in questions file {questions_path}")
    
    def setup_logging(self):
        """Setup logging configuration."""
        log_dir = Path("logs")
        log_dir.mkdir(exist_ok=True)
        
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        log_file = log_dir / f"stress_test_{timestamp}.log"
        
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_file, encoding='utf-8'),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger(__name__)
    
    async def make_request(self, session: aiohttp.ClientSession, question: str, request_id: int) -> Dict[str, Any]:
        """Make a single API request to the model."""
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.config.get('api_key', '')}"
        }
        
        payload = {
            "model": self.config["model"],
            "prompt": question,
            "max_tokens": self.config["max_tokens"],
            "temperature": self.config["temperature"],
            "stream": False
        }
        
        start_time = time.time()
        response_time = None
        char_count = 0
        success = False
        
        try:
            async with session.post(
                self.config["url"],
                headers=headers,
                json=payload,
                timeout=aiohttp.ClientTimeout(total=self.config['test_timeout_seconds'])
            ) as response:
                response_time = time.time() - start_time
                
                if response.status == 200:
                    result = await response.json()
                    content = result.get('choices', [{}])[0].get('text', '') if 'choices' in result else result.get('response', '')
                    char_count = len(content)
                    success = True
                    self.logger.info(f"Request {request_id}: Success - {response_time:.2f}s, {char_count} chars")
                else:
                    error_text = await response.text()
                    self.logger.error(f"Request {request_id}: HTTP {response.status} - {error_text}")
                    
        except asyncio.TimeoutError:
            self.logger.error(f"Request {request_id}: Timeout after {self.config['test_timeout_seconds']}s")
        except aiohttp.ClientError as e:
            self.logger.error(f"Request {request_id}: Client error - {str(e)}")
        except Exception as e:
            self.logger.error(f"Request {request_id}: Unexpected error - {str(e)}")
        
        return {
            "request_id": request_id,
            "question": question,
            "response_time": response_time,
            "char_count": char_count,
            "success": success,
            "start_time": start_time
        }
    
    async def test_concurrency_level(self, concurrency_level: int) -> Dict[str, Any]:
        """Test a specific concurrency level."""
        self.logger.info(f"Testing concurrency level: {concurrency_level}")
        
        num_requests = concurrency_level
        connector = aiohttp.TCPConnector(limit=concurrency_level)
        
        async with aiohttp.ClientSession(connector=connector) as session:
            tasks = []
            request_results = []
            
            # Create tasks
            for i in range(num_requests):
                question = random.choice(self.questions)
                task = self.make_request(session, question, i + 1)
                tasks.append(task)
            
            # Run tasks with limited concurrency
            semaphore = asyncio.Semaphore(concurrency_level)
            
            async def limited_task(task):
                async with semaphore:
                    return await task
            
            limited_tasks = [limited_task(task) for task in tasks]
            results = await asyncio.gather(*limited_tasks, return_exceptions=True)
            
            # Process results
            for result in results:
                if isinstance(result, Exception):
                    self.logger.error(f"Task failed with exception: {result}")
                    request_results.append({
                        "success": False,
                        "response_time": None,
                        "char_count": 0
                    })
                else:
                    request_results.append(result)
            
            # Calculate statistics
            successful_requests = [r for r in request_results if r["success"] and r["response_time"] is not None]
            failed_requests = len(request_results) - len(successful_requests)
            failure_rate = failed_requests / num_requests if num_requests > 0 else 1.0
            
            if successful_requests:
                response_times = [r["response_time"] for r in successful_requests]
                char_counts = [r["char_count"] for r in successful_requests]
                
                stats = {
                    "concurrency_level": concurrency_level,
                    "total_requests": num_requests,
                    "successful_requests": len(successful_requests),
                    "failed_requests": failed_requests,
                    "failure_rate": failure_rate,
                    "avg_response_time": sum(response_times) / len(response_times),
                    "min_response_time": min(response_times),
                    "max_response_time": max(response_times),
                    "avg_chars_per_response": sum(char_counts) / len(char_counts),
                    "total_chars": sum(char_counts),
                    "throughput": len(successful_requests) / sum(response_times) if sum(response_times) > 0 else 0,
                    "within_threshold": failure_rate <= self.config['failure_rate_threshold']
                }
            else:
                stats = {
                    "concurrency_level": concurrency_level,
                    "total_requests": num_requests,
                    "successful_requests": 0,
                    "failed_requests": failed_requests,
                    "failure_rate": failure_rate,
                    "avg_response_time": 0,
                    "min_response_time": 0,
                    "max_response_time": 0,
                    "avg_chars_per_response": 0,
                    "total_chars": 0,
                    "throughput": 0,
                    "within_threshold": False
                }
            
            self.logger.info(f"Concurrency {concurrency_level} results: Failure rate {failure_rate:.2%}, Within threshold: {stats['within_threshold']}")
            return stats
    
    async def run_stress_test(self):
        """Run the incremental stress test with specific concurrency levels."""
        self.logger.info("Starting incremental stress test")
        self.logger.info(f"Failure rate threshold: {self.config['failure_rate_threshold']:.2%}")
        
        # Use concurrency levels from config
        concurrency_levels = self.config['concurrency_levels']
        self.max_successful_concurrency = 0
        self.first_failed_concurrency = None
        
        for current_concurrency in concurrency_levels:
            result = await self.test_concurrency_level(current_concurrency)
            self.results.append(result)
            
            # Save intermediate results
            self.save_results()
            
            # Calculate success rate
            success_rate = result['successful_requests'] / result['total_requests'] if result['total_requests'] > 0 else 0
            failure_rate = result['failed_requests'] / result['total_requests'] if result['total_requests'] > 0 else 1.0
            
            self.logger.info(f"Concurrency {current_concurrency}: Success rate {success_rate:.2%}, Failure rate {failure_rate:.2%}")
            
            # Check if success rate meets threshold (90%)
            if success_rate >= self.config['success_rate_threshold']:
                self.max_successful_concurrency = current_concurrency
                self.logger.info(f"Concurrency {current_concurrency}: Success rate {success_rate:.2%} >= {self.config['success_rate_threshold']:.0%}, proceeding to next level")
            else:
                # Record the first failed concurrency level
                if self.first_failed_concurrency is None:
                    self.first_failed_concurrency = current_concurrency
                
                # If failure rate > 10%, stop testing
                if failure_rate > self.config['failure_rate_threshold']:
                    self.logger.info(f"Failure rate {failure_rate:.2%} > {self.config['failure_rate_threshold']:.0%}, stopping test")
                    break
                else:
                    self.logger.info(f"Success rate {success_rate:.2%} < {self.config['success_rate_threshold']:.0%}, stopping test")
                    break
            
            # Brief pause between tests
            await asyncio.sleep(1)
        
        self.logger.info(f"Stress test completed. Maximum successful concurrency: {self.max_successful_concurrency}")
    
    def save_results(self):
        """Save test results to JSON file."""
        result_dir = Path("stress_results")
        result_dir.mkdir(exist_ok=True)
        
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        result_file = result_dir / f"stress_test_results_{timestamp}.json"
        
        with open(result_file, 'w', encoding='utf-8') as f:
            json.dump({
                "config": self.config,
                "max_successful_concurrency": self.max_successful_concurrency,
                "first_failed_concurrency": self.first_failed_concurrency,
                "results": self.results
            }, f, indent=2, ensure_ascii=False)
        
        self.logger.info(f"Results saved to {result_file}")
    
    def generate_report(self):
        """Generate a comprehensive stress test report."""
        if not self.results:
            self.logger.warning("No results available for report generation")
            return
        
        result_dir = Path("stress_results")
        result_dir.mkdir(exist_ok=True)
        
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        report_file = result_dir / f"stress_test_report_{timestamp}.txt"
        
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write("AI Model Stress Test Report\n")
            f.write("=" * 50 + "\n\n")
            f.write(f"Test Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(f"Model: {self.config['model']}\n")
            f.write(f"API Endpoint: {self.config['url']}\n")
            f.write(f"Testing Methodology: Concurrency levels {self.config['concurrency_levels']}\n")
            f.write(f"Success Threshold: {self.config['success_rate_threshold']:.0%} (proceed if success rate >= threshold)\n")
            f.write(f"Failure Threshold: {self.config['failure_rate_threshold']:.0%} (stop if failure rate > threshold)\n\n")
            
            f.write("SUMMARY\n")
            f.write("=" * 50 + "\n")
            f.write(f"Maximum Successful Concurrency: {self.max_successful_concurrency}\n")
            if self.max_successful_concurrency > 0:
                f.write(f"This is the highest concurrency level where success rate >= {self.config['success_rate_threshold']:.0%}\n")
            else:
                f.write(f"No concurrency level achieved {self.config['success_rate_threshold']:.0%} success rate\n")
            f.write("\n")
            
            # Add information about first failed concurrency if it exists
            if self.first_failed_concurrency is not None:
                f.write(f"First Failed Concurrency: {self.first_failed_concurrency}\n")
                # Find the result for the first failed concurrency
                failed_result = next((r for r in self.results if r['concurrency_level'] == self.first_failed_concurrency), None)
                if failed_result:
                    failure_rate = failed_result['failed_requests'] / failed_result['total_requests']
                    f.write(f"  Failure Rate at Concurrency {self.first_failed_concurrency}: {failure_rate:.2%}\n")
                f.write("\n")
            
            f.write("DETAILED RESULTS\n")
            f.write("=" * 50 + "\n")
            
            for result in self.results:
                f.write(f"\nConcurrency Level: {result['concurrency_level']}\n")
                f.write(f"  Total Requests: {result['total_requests']}\n")
                f.write(f"  Successful: {result['successful_requests']}\n")
                f.write(f"  Failed: {result['failed_requests']}\n")
                f.write(f"  Failure Rate: {result['failure_rate']:.2%}\n")
                f.write(f"  Within Threshold: {'Yes' if result['within_threshold'] else 'No'}\n")
                if result['successful_requests'] > 0:
                    f.write(f"  Avg Response Time: {result['avg_response_time']:.2f}s\n")
                    f.write(f"  Min Response Time: {result['min_response_time']:.2f}s\n")
                    f.write(f"  Max Response Time: {result['max_response_time']:.2f}s\n")
                    f.write(f"  Avg Chars/Response: {result['avg_chars_per_response']:.0f}\n")
                    f.write(f"  Throughput: {result['throughput']:.2f} req/s\n")
            
            f.write("\nRECOMMENDATION\n")
            f.write("=" * 50 + "\n")
            if self.max_successful_concurrency > 0:
                f.write(f"For production use, consider setting maximum concurrency to {self.max_successful_concurrency}\n")
                f.write(f"to maintain {self.config['success_rate_threshold']:.0%} success rate based on incremental testing\n")
            else:
                f.write(f"No concurrency level achieved {self.config['success_rate_threshold']:.0%} success rate.\n")
                if self.results:
                    last_result = self.results[-1]
                    failure_rate = last_result['failed_requests'] / last_result['total_requests']
                    if failure_rate > 0.1:
                        f.write(f"Testing stopped at concurrency {last_result['concurrency_level']} with failure rate {failure_rate:.2%} (>10% threshold)\n")
                    else:
                        f.write(f"Testing stopped at concurrency {last_result['concurrency_level']} with {last_result['failed_requests']} failed requests\n")
                f.write("Consider investigating API connectivity or system performance.\n")
        
        self.logger.info(f"Report saved to {report_file}")

async def main():
    """Main function to run the stress test."""
    try:
        tester = StressTester()
        await tester.run_stress_test()
        tester.generate_report()
        tester.logger.info("Stress test completed successfully!")
        
    except Exception as e:
        logging.error(f"Stress test failed: {str(e)}")
        raise

if __name__ == "__main__":
    asyncio.run(main())