# -*- coding: utf-8 -*-
"""
ProgressiveDataManager 测试配置文件

提供共享的pytest fixtures和测试配置
作者: 测试团队
创建日期: 2024年
"""

import pytest
import sys
import os
import time
import json
import tempfile
import shutil
from unittest.mock import Mock, patch
from typing import Dict, List, Any, Generator

# 添加项目根目录到Python路径
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))

from service.ProgressiveDataManager import ProgressiveDataManager


# ==================== 测试配置 ====================

# 测试标记定义
pytest_plugins = []

def pytest_configure(config):
    """pytest配置"""
    config.addinivalue_line(
        "markers", "slow: 标记为慢速测试（运行时间>10秒）"
    )
    config.addinivalue_line(
        "markers", "performance: 标记为性能测试"
    )
    config.addinivalue_line(
        "markers", "stability: 标记为稳定性测试"
    )
    config.addinivalue_line(
        "markers", "integration: 标记为集成测试"
    )
    config.addinivalue_line(
        "markers", "regression: 标记为回归测试"
    )
    config.addinivalue_line(
        "markers", "comparison: 标记为对比测试"
    )

# 测试超时配置
TEST_TIMEOUT = 300  # 5分钟
PERFORMANCE_TEST_TIMEOUT = 600  # 10分钟
STABILITY_TEST_TIMEOUT = 1800  # 30分钟

# 性能测试阈值
PERFORMANCE_THRESHOLDS = {
    'max_execution_time_1s': 0.5,  # 1秒分析最大执行时间(秒)
    'max_execution_time_3s': 0.8,  # 3秒分析最大执行时间(秒)
    'max_memory_increase': 0.05,   # 最大内存增长比例(5%)
    'min_performance_improvement': 0.3,  # 最小性能提升比例(30%)
    'max_cpu_usage': 0.8,         # 最大CPU使用率(80%)
}

# 信号质量测试数据
SIGNAL_QUALITY_PATTERNS = {
    'excellent': [10, 15, 12, 18, 14],
    'good': [25, 30, 20, 35, 28],
    'acceptable': [45, 50, 40, 55, 48],
    'poor': [110, 120, 105, 125, 115],
    'very_poor': [150, 180, 160, 170, 155],
    'critical': [220, 250, 230, 240, 235],
    'mixed_quality': [25, 150, 30, 120, 20],
    'degrading': [25, 50, 75, 125, 200],
    'improving': [200, 150, 100, 50, 25],
    'fluctuating': [25, 150, 30, 120, 20, 200, 15, 180, 10]
}


# ==================== 基础Fixtures ====================

@pytest.fixture(scope="session")
def test_session_id():
    """测试会话ID"""
    return f"test_session_{int(time.time())}"


@pytest.fixture(scope="function")
def temp_dir():
    """临时目录fixture"""
    temp_path = tempfile.mkdtemp(prefix="pdm_test_")
    yield temp_path
    # 清理临时目录
    if os.path.exists(temp_path):
        shutil.rmtree(temp_path)


@pytest.fixture(scope="function")
def clean_manager():
    """干净的ProgressiveDataManager实例"""
    session_id = f"test_{int(time.time() * 1000000)}"
    manager = ProgressiveDataManager(session_id)
    yield manager
    # 清理资源
    del manager


@pytest.fixture(scope="function")
def manager_with_warmup_data(clean_manager):
    """包含预热数据的ProgressiveDataManager实例"""
    manager = clean_manager
    
    # 添加预热数据（30个数据点，每个数据点间隔100ms）
    base_time = time.time()
    for i in range(30):
        data_point = {
            'timestamp': base_time + i * 0.1,
            'signal_quality': 25 + (i % 10),  # 25-34之间的良好信号
            'eeg_data': [100 + i] * 8,  # 模拟8通道EEG数据
            'other_metrics': {'attention': 0.7, 'meditation': 0.6}
        }
        manager.add_data_point(data_point)
    
    yield manager


@pytest.fixture(scope="function")
def manager_with_poor_signal(clean_manager):
    """包含差信号数据的ProgressiveDataManager实例"""
    manager = clean_manager
    
    # 添加差信号数据
    base_time = time.time()
    for i in range(10):
        data_point = {
            'timestamp': base_time + i * 0.1,
            'signal_quality': 150 + (i % 20),  # 150-169之间的差信号
            'eeg_data': [50 + i] * 8,
            'other_metrics': {'attention': 0.3, 'meditation': 0.2}
        }
        manager.add_data_point(data_point)
    
    yield manager


# ==================== 测试数据Fixtures ====================

@pytest.fixture(scope="session")
def signal_patterns():
    """信号质量模式数据"""
    return SIGNAL_QUALITY_PATTERNS


@pytest.fixture(scope="function")
def test_data_generator():
    """测试数据生成器"""
    def _generate_data_sequence(pattern_name: str, count: int = None) -> List[Dict]:
        """生成指定模式的数据序列"""
        if pattern_name not in SIGNAL_QUALITY_PATTERNS:
            raise ValueError(f"未知的信号模式: {pattern_name}")
        
        pattern = SIGNAL_QUALITY_PATTERNS[pattern_name]
        if count is None:
            count = len(pattern)
        
        base_time = time.time()
        data_sequence = []
        
        for i in range(count):
            signal_quality = pattern[i % len(pattern)]
            data_point = {
                'timestamp': base_time + i * 0.1,
                'signal_quality': signal_quality,
                'eeg_data': [100 + i + j for j in range(8)],
                'other_metrics': {
                    'attention': max(0.1, 1.0 - signal_quality / 200),
                    'meditation': max(0.1, 1.0 - signal_quality / 250)
                }
            }
            data_sequence.append(data_point)
        
        return data_sequence
    
    return _generate_data_sequence


@pytest.fixture(scope="function")
def performance_baseline():
    """性能基线数据"""
    baseline_file = os.path.join(os.path.dirname(__file__), 'test_data', 'performance_baseline.json')
    
    # 默认基线数据
    default_baseline = {
        'execution_time_3s_analysis': 0.5,  # 3秒分析执行时间
        'memory_usage_baseline': 50 * 1024 * 1024,  # 50MB基线内存
        'cpu_usage_baseline': 0.3,  # 30%基线CPU使用率
        'last_updated': time.time()
    }
    
    # 尝试加载现有基线数据
    if os.path.exists(baseline_file):
        try:
            with open(baseline_file, 'r', encoding='utf-8') as f:
                baseline = json.load(f)
                # 合并默认值
                for key, value in default_baseline.items():
                    if key not in baseline:
                        baseline[key] = value
                return baseline
        except (json.JSONDecodeError, IOError):
            pass
    
    return default_baseline


# ==================== Mock Fixtures ====================

@pytest.fixture(scope="function")
def mock_time():
    """模拟时间的fixture"""
    with patch('time.time') as mock:
        # 设置固定的起始时间
        mock.return_value = 1640995200.0  # 2022-01-01 00:00:00 UTC
        yield mock


@pytest.fixture(scope="function")
def mock_memory_profiler():
    """模拟内存分析器的fixture"""
    with patch('memory_profiler.memory_usage') as mock:
        # 模拟内存使用情况
        mock.return_value = [50.0, 52.0, 51.5, 53.0, 52.5]  # MB
        yield mock


@pytest.fixture(scope="function")
def mock_system_resources():
    """模拟系统资源监控的fixture"""
    with patch('psutil.Process') as mock_process:
        # 创建模拟的进程对象
        mock_instance = Mock()
        mock_instance.memory_info.return_value.rss = 50 * 1024 * 1024  # 50MB
        mock_instance.cpu_percent.return_value = 25.0  # 25% CPU
        mock_process.return_value = mock_instance
        yield mock_instance


# ==================== 测试环境Fixtures ====================

@pytest.fixture(scope="session", autouse=True)
def setup_test_environment():
    """设置测试环境"""
    # 创建测试数据目录
    test_data_dir = os.path.join(os.path.dirname(__file__), 'test_data')
    os.makedirs(test_data_dir, exist_ok=True)
    
    # 创建测试报告目录
    reports_dir = os.path.join(os.path.dirname(__file__), 'reports')
    for subdir in ['coverage', 'performance', 'html']:
        os.makedirs(os.path.join(reports_dir, subdir), exist_ok=True)
    
    yield
    
    # 测试结束后的清理工作
    # 注意：不删除报告目录，以便查看测试结果


@pytest.fixture(scope="function", autouse=True)
def test_isolation():
    """测试隔离fixture，确保测试之间不相互影响"""
    # 测试前的准备工作
    import gc
    gc.collect()  # 强制垃圾回收
    
    yield
    
    # 测试后的清理工作
    gc.collect()  # 再次垃圾回收


# ==================== 性能测试Fixtures ====================

@pytest.fixture(scope="function")
def performance_monitor():
    """性能监控fixture"""
    class PerformanceMonitor:
        def __init__(self):
            self.start_time = None
            self.end_time = None
            self.start_memory = None
            self.end_memory = None
            self.measurements = []
        
        def start_monitoring(self):
            """开始监控"""
            import psutil
            self.start_time = time.time()
            process = psutil.Process()
            self.start_memory = process.memory_info().rss
        
        def stop_monitoring(self):
            """停止监控"""
            import psutil
            self.end_time = time.time()
            process = psutil.Process()
            self.end_memory = process.memory_info().rss
        
        def get_execution_time(self):
            """获取执行时间"""
            if self.start_time and self.end_time:
                return self.end_time - self.start_time
            return None
        
        def get_memory_usage(self):
            """获取内存使用量变化"""
            if self.start_memory and self.end_memory:
                return self.end_memory - self.start_memory
            return None
        
        def add_measurement(self, name: str, value: float):
            """添加测量值"""
            self.measurements.append({
                'name': name,
                'value': value,
                'timestamp': time.time()
            })
        
        def get_summary(self):
            """获取性能摘要"""
            return {
                'execution_time': self.get_execution_time(),
                'memory_usage_change': self.get_memory_usage(),
                'measurements': self.measurements
            }
    
    return PerformanceMonitor()


# ==================== 辅助函数Fixtures ====================

@pytest.fixture(scope="function")
def assert_helpers():
    """断言辅助函数"""
    class AssertHelpers:
        @staticmethod
        def assert_performance_improvement(old_time: float, new_time: float, min_improvement: float = 0.3):
            """断言性能提升"""
            improvement = (old_time - new_time) / old_time
            assert improvement >= min_improvement, f"性能提升{improvement:.2%}不足，要求至少{min_improvement:.2%}"
        
        @staticmethod
        def assert_memory_usage_acceptable(memory_change: int, max_increase: int = 10 * 1024 * 1024):
            """断言内存使用可接受"""
            assert memory_change <= max_increase, f"内存增长{memory_change / 1024 / 1024:.1f}MB超过限制{max_increase / 1024 / 1024:.1f}MB"
        
        @staticmethod
        def assert_signal_quality_analysis(manager, expected_can_analyze: bool):
            """断言信号质量分析结果"""
            actual_can_analyze = manager.can_analyze_brain_state()
            assert actual_can_analyze == expected_can_analyze, f"信号质量分析结果不符合预期: 期望{expected_can_analyze}, 实际{actual_can_analyze}"
        
        @staticmethod
        def assert_auto_disconnect_logic(manager, expected_should_disconnect: bool):
            """断言自动断开逻辑"""
            actual_should_disconnect = manager.should_auto_disconnect()
            assert actual_should_disconnect == expected_should_disconnect, f"自动断开逻辑不符合预期: 期望{expected_should_disconnect}, 实际{actual_should_disconnect}"
    
    return AssertHelpers()


@pytest.fixture(scope="function")
def test_reporter():
    """测试报告生成器"""
    class TestReporter:
        def __init__(self):
            self.test_results = []
        
        def add_result(self, test_name: str, result: Dict[str, Any]):
            """添加测试结果"""
            self.test_results.append({
                'test_name': test_name,
                'timestamp': time.time(),
                'result': result
            })
        
        def generate_summary(self):
            """生成测试摘要"""
            total_tests = len(self.test_results)
            passed_tests = sum(1 for r in self.test_results if r['result'].get('status') == 'passed')
            
            return {
                'total_tests': total_tests,
                'passed_tests': passed_tests,
                'failed_tests': total_tests - passed_tests,
                'pass_rate': passed_tests / total_tests if total_tests > 0 else 0,
                'results': self.test_results
            }
        
        def save_report(self, filepath: str):
            """保存测试报告"""
            summary = self.generate_summary()
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(summary, f, indent=2, ensure_ascii=False)
    
    return TestReporter()


# ==================== 参数化测试数据 ====================

# 信号质量测试参数
SIGNAL_QUALITY_TEST_PARAMS = [
    ('excellent', True, False),   # (模式名, 可以分析, 应该断开)
    ('good', True, False),
    ('acceptable', True, False),
    ('poor', False, False),
    ('very_poor', False, False),
    ('critical', False, True),
]

# 性能测试参数
PERFORMANCE_TEST_PARAMS = [
    (10, 'small_dataset'),     # (数据点数量, 描述)
    (100, 'medium_dataset'),
    (1000, 'large_dataset'),
    (5000, 'very_large_dataset'),
]

# 时间窗口测试参数
TIME_WINDOW_TEST_PARAMS = [
    (1, 'one_second_window'),
    (3, 'three_second_window'),
    (5, 'five_second_window'),
]


# ==================== 测试标记装饰器 ====================

def slow_test(func):
    """标记为慢速测试的装饰器"""
    return pytest.mark.slow(func)


def performance_test(func):
    """标记为性能测试的装饰器"""
    return pytest.mark.performance(func)


def stability_test(func):
    """标记为稳定性测试的装饰器"""
    return pytest.mark.stability(func)


def integration_test(func):
    """标记为集成测试的装饰器"""
    return pytest.mark.integration(func)


def regression_test(func):
    """标记为回归测试的装饰器"""
    return pytest.mark.regression(func)


def comparison_test(func):
    """标记为对比测试的装饰器"""
    return pytest.mark.comparison(func)


# ==================== 测试钩子函数 ====================

def pytest_runtest_setup(item):
    """测试运行前的钩子"""
    # 为慢速测试设置更长的超时时间
    if 'slow' in item.keywords:
        item.config.option.timeout = STABILITY_TEST_TIMEOUT
    elif 'performance' in item.keywords:
        item.config.option.timeout = PERFORMANCE_TEST_TIMEOUT
    else:
        item.config.option.timeout = TEST_TIMEOUT


def pytest_runtest_teardown(item, nextitem):
    """测试运行后的钩子"""
    # 强制垃圾回收，确保内存清理
    import gc
    gc.collect()


def pytest_collection_modifyitems(config, items):
    """修改测试收集的钩子"""
    # 如果指定了快速模式，跳过慢速测试
    if config.getoption("--fast", default=False):
        skip_slow = pytest.mark.skip(reason="跳过慢速测试（快速模式）")
        for item in items:
            if "slow" in item.keywords:
                item.add_marker(skip_slow)


def pytest_addoption(parser):
    """添加命令行选项"""
    parser.addoption(
        "--fast",
        action="store_true",
        default=False,
        help="快速模式：跳过慢速测试"
    )
    parser.addoption(
        "--performance-only",
        action="store_true",
        default=False,
        help="仅运行性能测试"
    )
    parser.addoption(
        "--baseline-update",
        action="store_true",
        default=False,
        help="更新性能基线数据"
    )


# ==================== 测试数据清理 ====================

@pytest.fixture(scope="session", autouse=True)
def cleanup_test_artifacts():
    """清理测试产生的文件"""
    yield
    
    # 清理临时文件
    temp_patterns = [
        'temp_test_*',
        '*.tmp',
        'test_session_*',
    ]
    
    import glob
    for pattern in temp_patterns:
        for filepath in glob.glob(pattern):
            try:
                if os.path.isfile(filepath):
                    os.remove(filepath)
                elif os.path.isdir(filepath):
                    shutil.rmtree(filepath)
            except (OSError, IOError):
                pass  # 忽略清理错误


# ==================== 版本信息 ====================

__version__ = "1.0.0"
__author__ = "测试团队"
__created__ = "2024年"
__description__ = "ProgressiveDataManager数据分析优化测试配置文件"