#!/usr/bin/env python3
"""
Integration Test Runner for EchokitBot Vision Interaction System

This script provides a comprehensive test runner that can be integrated with
existing test infrastructure and CI/CD pipelines.

Features:
- Automated test discovery and execution
- Detailed test reporting
- Performance benchmarking
- Compatibility validation
- Error analysis and debugging
"""

import os
import sys
import time
import json
import yaml
import argparse
import subprocess
import tempfile
from pathlib import Path
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, asdict
from datetime import datetime
import unittest
import logging

# Add package to path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))

# Import test modules
from test_end_to_end_integration import TestEndToEndIntegration, TestSystemCompatibility


@dataclass
class TestResult:
    """Test result data structure"""
    test_name: str
    status: str  # "PASS", "FAIL", "SKIP", "ERROR"
    duration: float
    message: str = ""
    details: Dict[str, Any] = None
    timestamp: str = ""
    
    def __post_init__(self):
        if not self.timestamp:
            self.timestamp = datetime.now().isoformat()
        if self.details is None:
            self.details = {}


@dataclass
class TestSuite:
    """Test suite configuration"""
    name: str
    description: str
    tests: List[str]
    setup_commands: List[str] = None
    teardown_commands: List[str] = None
    timeout: int = 300
    
    def __post_init__(self):
        if self.setup_commands is None:
            self.setup_commands = []
        if self.teardown_commands is None:
            self.teardown_commands = []


class IntegrationTestRunner:
    """Main test runner class"""
    
    def __init__(self, config_file: Optional[str] = None):
        """Initialize test runner"""
        self.config_file = config_file or self._find_config_file()
        self.config = self._load_config()
        self.results: List[TestResult] = []
        self.start_time = time.time()
        
        # Setup logging
        self._setup_logging()
        
        # Create temp directory for test artifacts
        self.temp_dir = tempfile.mkdtemp(prefix="echokitbot_test_")
        
    def _find_config_file(self) -> str:
        """Find test configuration file"""
        possible_paths = [
            "test_integration_config.yaml",
            "test/test_integration_config.yaml",
            "src/echokitbot_microros_voice/test/test_integration_config.yaml"
        ]
        
        for path in possible_paths:
            if os.path.exists(path):
                return path
        
        # Create default config if none found
        return self._create_default_config()
    
    def _create_default_config(self) -> str:
        """Create default test configuration"""
        default_config = {
            'test_config': {
                'environment': {
                    'ros_domain_id': 42,
                    'log_level': 'INFO'
                },
                'performance': {
                    'max_response_time': 5.0,
                    'max_memory_usage': 100.0
                }
            }
        }
        
        config_path = os.path.join(self.temp_dir, "default_config.yaml")
        with open(config_path, 'w') as f:
            yaml.dump(default_config, f)
        
        return config_path
    
    def _load_config(self) -> Dict[str, Any]:
        """Load test configuration"""
        try:
            with open(self.config_file, 'r') as f:
                return yaml.safe_load(f)
        except Exception as e:
            print(f"Warning: Could not load config file {self.config_file}: {e}")
            return {}
    
    def _setup_logging(self):
        """Setup logging configuration"""
        log_level = self.config.get('test_config', {}).get('environment', {}).get('log_level', 'INFO')
        
        logging.basicConfig(
            level=getattr(logging, log_level),
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            handlers=[
                logging.StreamHandler(sys.stdout),
                logging.FileHandler(os.path.join(self.temp_dir, 'test.log'))
            ]
        )
        
        self.logger = logging.getLogger(__name__)
    
    def setup_environment(self):
        """Setup test environment"""
        self.logger.info("Setting up test environment...")
        
        # Set ROS domain ID
        ros_domain_id = self.config.get('test_config', {}).get('environment', {}).get('ros_domain_id', 42)
        os.environ['ROS_DOMAIN_ID'] = str(ros_domain_id)
        
        # Set Python path
        package_path = os.path.join(os.path.dirname(__file__), '..')
        if package_path not in sys.path:
            sys.path.insert(0, package_path)
        
        # Set test environment variables
        os.environ['ECHOKITBOT_TEST_MODE'] = '1'
        os.environ['ECHOKITBOT_TEST_CONFIG_DIR'] = self.temp_dir
        
        self.logger.info("Test environment setup complete")
    
    def run_test_suite(self, suite_name: str) -> List[TestResult]:
        """Run a specific test suite"""
        self.logger.info(f"Running test suite: {suite_name}")
        
        suite_results = []
        
        if suite_name == "end_to_end":
            suite_results.extend(self._run_python_tests(TestEndToEndIntegration))
        elif suite_name == "compatibility":
            suite_results.extend(self._run_python_tests(TestSystemCompatibility))
        elif suite_name == "performance":
            suite_results.extend(self._run_performance_tests())
        elif suite_name == "all":
            suite_results.extend(self._run_python_tests(TestEndToEndIntegration))
            suite_results.extend(self._run_python_tests(TestSystemCompatibility))
            suite_results.extend(self._run_performance_tests())
        else:
            self.logger.error(f"Unknown test suite: {suite_name}")
        
        return suite_results
    
    def _run_python_tests(self, test_class) -> List[TestResult]:
        """Run Python unittest test class"""
        results = []
        
        # Create test suite
        loader = unittest.TestLoader()
        suite = loader.loadTestsFromTestCase(test_class)
        
        # Run tests
        for test in suite:
            test_name = f"{test_class.__name__}.{test._testMethodName}"
            start_time = time.time()
            
            try:
                # Setup test environment
                if hasattr(test, 'setUp'):
                    test.setUp()
                
                # Run test
                test.debug()
                
                # Test passed
                duration = time.time() - start_time
                result = TestResult(
                    test_name=test_name,
                    status="PASS",
                    duration=duration,
                    message="Test completed successfully"
                )
                
            except unittest.SkipTest as e:
                duration = time.time() - start_time
                result = TestResult(
                    test_name=test_name,
                    status="SKIP",
                    duration=duration,
                    message=str(e)
                )
                
            except Exception as e:
                duration = time.time() - start_time
                result = TestResult(
                    test_name=test_name,
                    status="FAIL",
                    duration=duration,
                    message=str(e),
                    details={"exception_type": type(e).__name__}
                )
                
            finally:
                # Cleanup
                if hasattr(test, 'tearDown'):
                    try:
                        test.tearDown()
                    except Exception as e:
                        self.logger.warning(f"Teardown failed for {test_name}: {e}")
            
            results.append(result)
            self.logger.info(f"Test {test_name}: {result.status} ({result.duration:.2f}s)")
        
        return results
    
    def _run_performance_tests(self) -> List[TestResult]:
        """Run performance-specific tests"""
        results = []
        
        # Memory usage test
        start_time = time.time()
        try:
            import psutil
            process = psutil.Process()
            initial_memory = process.memory_info().rss / 1024 / 1024  # MB
            
            # Simulate some processing
            import numpy as np
            for i in range(100):
                data = np.random.random((1000, 1000))
                processed = np.mean(data)
                del data
            
            final_memory = process.memory_info().rss / 1024 / 1024  # MB
            memory_increase = final_memory - initial_memory
            
            max_memory = self.config.get('test_config', {}).get('performance', {}).get('max_memory_usage', 100.0)
            
            if memory_increase < max_memory:
                status = "PASS"
                message = f"Memory usage within limits: {memory_increase:.2f}MB"
            else:
                status = "FAIL"
                message = f"Memory usage exceeded limit: {memory_increase:.2f}MB > {max_memory}MB"
            
            duration = time.time() - start_time
            result = TestResult(
                test_name="performance.memory_usage",
                status=status,
                duration=duration,
                message=message,
                details={"initial_memory": initial_memory, "final_memory": final_memory}
            )
            
        except Exception as e:
            duration = time.time() - start_time
            result = TestResult(
                test_name="performance.memory_usage",
                status="ERROR",
                duration=duration,
                message=str(e)
            )
        
        results.append(result)
        
        # Response time test
        start_time = time.time()
        try:
            # Simulate processing delay
            import time
            processing_start = time.time()
            time.sleep(0.1)  # Simulate 100ms processing
            processing_time = time.time() - processing_start
            
            max_response_time = self.config.get('test_config', {}).get('performance', {}).get('max_response_time', 5.0)
            
            if processing_time < max_response_time:
                status = "PASS"
                message = f"Response time within limits: {processing_time:.3f}s"
            else:
                status = "FAIL"
                message = f"Response time exceeded limit: {processing_time:.3f}s > {max_response_time}s"
            
            duration = time.time() - start_time
            result = TestResult(
                test_name="performance.response_time",
                status=status,
                duration=duration,
                message=message,
                details={"processing_time": processing_time}
            )
            
        except Exception as e:
            duration = time.time() - start_time
            result = TestResult(
                test_name="performance.response_time",
                status="ERROR",
                duration=duration,
                message=str(e)
            )
        
        results.append(result)
        
        return results
    
    def generate_report(self, output_file: Optional[str] = None) -> str:
        """Generate test report"""
        if not output_file:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            output_file = os.path.join(self.temp_dir, f"integration_test_report_{timestamp}.json")
        
        # Calculate summary statistics
        total_tests = len(self.results)
        passed_tests = len([r for r in self.results if r.status == "PASS"])
        failed_tests = len([r for r in self.results if r.status == "FAIL"])
        skipped_tests = len([r for r in self.results if r.status == "SKIP"])
        error_tests = len([r for r in self.results if r.status == "ERROR"])
        
        total_duration = time.time() - self.start_time
        
        # Create report
        report = {
            "summary": {
                "total_tests": total_tests,
                "passed": passed_tests,
                "failed": failed_tests,
                "skipped": skipped_tests,
                "errors": error_tests,
                "success_rate": passed_tests / total_tests if total_tests > 0 else 0,
                "total_duration": total_duration,
                "timestamp": datetime.now().isoformat()
            },
            "configuration": self.config,
            "results": [asdict(result) for result in self.results],
            "environment": {
                "python_version": sys.version,
                "platform": sys.platform,
                "ros_domain_id": os.environ.get('ROS_DOMAIN_ID', 'not_set'),
                "temp_dir": self.temp_dir
            }
        }
        
        # Write report
        with open(output_file, 'w') as f:
            json.dump(report, f, indent=2)
        
        self.logger.info(f"Test report generated: {output_file}")
        return output_file
    
    def print_summary(self):
        """Print test summary to console"""
        total_tests = len(self.results)
        passed_tests = len([r for r in self.results if r.status == "PASS"])
        failed_tests = len([r for r in self.results if r.status == "FAIL"])
        skipped_tests = len([r for r in self.results if r.status == "SKIP"])
        error_tests = len([r for r in self.results if r.status == "ERROR"])
        
        print("\n" + "="*70)
        print("🏁 INTEGRATION TEST SUMMARY")
        print("="*70)
        print(f"Total Tests: {total_tests}")
        print(f"✅ Passed: {passed_tests}")
        print(f"❌ Failed: {failed_tests}")
        print(f"⏭️  Skipped: {skipped_tests}")
        print(f"💥 Errors: {error_tests}")
        
        if total_tests > 0:
            success_rate = passed_tests / total_tests * 100
            print(f"📊 Success Rate: {success_rate:.1f}%")
        
        total_duration = time.time() - self.start_time
        print(f"⏱️  Total Duration: {total_duration:.2f}s")
        
        # Print failed tests
        if failed_tests > 0 or error_tests > 0:
            print("\n❌ FAILED/ERROR TESTS:")
            for result in self.results:
                if result.status in ["FAIL", "ERROR"]:
                    print(f"  - {result.test_name}: {result.message}")
        
        print("="*70)
    
    def cleanup(self):
        """Cleanup test environment"""
        self.logger.info("Cleaning up test environment...")
        
        # Remove temp directory
        import shutil
        try:
            shutil.rmtree(self.temp_dir)
        except Exception as e:
            self.logger.warning(f"Could not remove temp directory {self.temp_dir}: {e}")
        
        self.logger.info("Cleanup complete")
    
    def run(self, test_suites: List[str]) -> bool:
        """Run integration tests"""
        try:
            self.setup_environment()
            
            # Run test suites
            for suite_name in test_suites:
                suite_results = self.run_test_suite(suite_name)
                self.results.extend(suite_results)
            
            # Generate report
            report_file = self.generate_report()
            
            # Print summary
            self.print_summary()
            
            # Return success status
            failed_tests = len([r for r in self.results if r.status in ["FAIL", "ERROR"]])
            return failed_tests == 0
            
        except Exception as e:
            self.logger.error(f"Test runner failed: {e}")
            return False
        
        finally:
            self.cleanup()


def main():
    """Main entry point"""
    parser = argparse.ArgumentParser(description="EchokitBot Vision Integration Test Runner")
    parser.add_argument(
        "--suite", 
        choices=["end_to_end", "compatibility", "performance", "all"],
        default="all",
        help="Test suite to run"
    )
    parser.add_argument(
        "--config",
        help="Path to test configuration file"
    )
    parser.add_argument(
        "--output",
        help="Output file for test report"
    )
    parser.add_argument(
        "--verbose", "-v",
        action="store_true",
        help="Enable verbose output"
    )
    
    args = parser.parse_args()
    
    # Create test runner
    runner = IntegrationTestRunner(config_file=args.config)
    
    # Set log level
    if args.verbose:
        logging.getLogger().setLevel(logging.DEBUG)
    
    # Run tests
    test_suites = [args.suite] if args.suite != "all" else ["end_to_end", "compatibility", "performance"]
    success = runner.run(test_suites)
    
    # Exit with appropriate code
    sys.exit(0 if success else 1)


if __name__ == "__main__":
    main()