# -*- coding: utf-8 -*-
"""
Python 企业级并发集合和异步编程最佳实践示例

本模块演示了Python并发集合和异步编程的企业级最佳实践，包括:
- 线程安全的集合操作
- 异步编程模式
- 并发数据处理
- 资源管理和上下文管理器
- 性能优化和监控
- 错误处理和恢复
- 分布式任务处理
- 内存管理和缓存策略
"""

import asyncio
import concurrent.futures
import functools
import logging
import queue
import threading
import time
import weakref
from collections import Counter, defaultdict, deque, namedtuple
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
from contextlib import asynccontextmanager, contextmanager
from dataclasses import dataclass, field
from enum import Enum, auto
from typing import (
    Any, AsyncGenerator, AsyncIterator, Callable, Dict, Generator, 
    Iterable, List, Optional, Set, Tuple, TypeVar, Union, Coroutine
)
import sys
import multiprocessing
from threading import Lock, RLock, Semaphore, Event, Condition

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(name)s - %(message)s'
)
logger = logging.getLogger(__name__)

# 类型变量
T = TypeVar('T')
U = TypeVar('U')

class TaskStatus(Enum):
    """任务状态枚举"""
    PENDING = auto()
    RUNNING = auto()
    COMPLETED = auto()
    FAILED = auto()
    CANCELLED = auto()

class ConcurrencyType(Enum):
    """并发类型枚举"""
    THREAD = auto()
    PROCESS = auto()
    ASYNC = auto()
    HYBRID = auto()

@dataclass
class TaskResult:
    """任务结果"""
    task_id: str
    status: TaskStatus
    result: Any = None
    error: Optional[Exception] = None
    execution_time: float = 0.0
    worker_id: Optional[str] = None

@dataclass
class PerformanceMetrics:
    """性能指标"""
    operation_name: str
    total_tasks: int = 0
    completed_tasks: int = 0
    failed_tasks: int = 0
    total_time: float = 0.0
    avg_task_time: float = 0.0
    throughput: float = 0.0  # 任务/秒
    memory_usage: int = 0
    cpu_usage: float = 0.0

class ThreadSafeCounter:
    """线程安全计数器"""
    
    def __init__(self, initial_value: int = 0):
        self._value = initial_value
        self._lock = Lock()
    
    def increment(self, amount: int = 1) -> int:
        """原子递增"""
        with self._lock:
            self._value += amount
            return self._value
    
    def decrement(self, amount: int = 1) -> int:
        """原子递减"""
        with self._lock:
            self._value -= amount
            return self._value
    
    def get(self) -> int:
        """获取当前值"""
        with self._lock:
            return self._value
    
    def set(self, value: int) -> None:
        """设置值"""
        with self._lock:
            self._value = value
    
    def compare_and_swap(self, expected: int, new_value: int) -> bool:
        """比较并交换"""
        with self._lock:
            if self._value == expected:
                self._value = new_value
                return True
            return False

class ThreadSafeCache:
    """线程安全缓存"""
    
    def __init__(self, max_size: int = 1000):
        self._cache: Dict[Any, Any] = {}
        self._access_order = deque()
        self._max_size = max_size
        self._lock = RLock()
        self._hits = ThreadSafeCounter()
        self._misses = ThreadSafeCounter()
    
    def get(self, key: Any, default: Any = None) -> Any:
        """获取缓存值"""
        with self._lock:
            if key in self._cache:
                # 更新访问顺序
                self._access_order.remove(key)
                self._access_order.append(key)
                self._hits.increment()
                return self._cache[key]
            else:
                self._misses.increment()
                return default
    
    def put(self, key: Any, value: Any) -> None:
        """设置缓存值"""
        with self._lock:
            if key in self._cache:
                # 更新现有键
                self._access_order.remove(key)
            elif len(self._cache) >= self._max_size:
                # 移除最久未使用的项
                oldest_key = self._access_order.popleft()
                del self._cache[oldest_key]
            
            self._cache[key] = value
            self._access_order.append(key)
    
    def remove(self, key: Any) -> bool:
        """移除缓存项"""
        with self._lock:
            if key in self._cache:
                del self._cache[key]
                self._access_order.remove(key)
                return True
            return False
    
    def clear(self) -> None:
        """清空缓存"""
        with self._lock:
            self._cache.clear()
            self._access_order.clear()
    
    def stats(self) -> Dict[str, Any]:
        """获取缓存统计"""
        with self._lock:
            total_requests = self._hits.get() + self._misses.get()
            hit_rate = self._hits.get() / total_requests if total_requests > 0 else 0
            
            return {
                'size': len(self._cache),
                'max_size': self._max_size,
                'hits': self._hits.get(),
                'misses': self._misses.get(),
                'hit_rate': hit_rate,
                'total_requests': total_requests
            }

class AsyncTaskManager:
    """异步任务管理器"""
    
    def __init__(self, max_concurrent_tasks: int = 10):
        self._semaphore = asyncio.Semaphore(max_concurrent_tasks)
        self._tasks: Dict[str, asyncio.Task] = {}
        self._results: Dict[str, TaskResult] = {}
        self._lock = asyncio.Lock()
        self._task_counter = ThreadSafeCounter()
    
    async def submit_task(self, 
                         coro: Coroutine, 
                         task_id: Optional[str] = None) -> str:
        """提交异步任务"""
        if task_id is None:
            task_id = f"task_{self._task_counter.increment()}"
        
        async with self._lock:
            if task_id in self._tasks:
                raise ValueError(f"任务 {task_id} 已存在")
            
            # 创建包装的协程
            wrapped_coro = self._execute_with_semaphore(coro, task_id)
            task = asyncio.create_task(wrapped_coro)
            self._tasks[task_id] = task
            
            return task_id
    
    async def _execute_with_semaphore(self, coro: Coroutine, task_id: str) -> Any:
        """使用信号量执行协程"""
        async with self._semaphore:
            start_time = time.perf_counter()
            try:
                result = await coro
                execution_time = time.perf_counter() - start_time
                
                task_result = TaskResult(
                    task_id=task_id,
                    status=TaskStatus.COMPLETED,
                    result=result,
                    execution_time=execution_time
                )
                
                async with self._lock:
                    self._results[task_id] = task_result
                
                return result
                
            except Exception as e:
                execution_time = time.perf_counter() - start_time
                
                task_result = TaskResult(
                    task_id=task_id,
                    status=TaskStatus.FAILED,
                    error=e,
                    execution_time=execution_time
                )
                
                async with self._lock:
                    self._results[task_id] = task_result
                
                raise
    
    async def get_result(self, task_id: str, timeout: Optional[float] = None) -> TaskResult:
        """获取任务结果"""
        async with self._lock:
            if task_id not in self._tasks:
                raise ValueError(f"任务 {task_id} 不存在")
            
            task = self._tasks[task_id]
        
        try:
            await asyncio.wait_for(task, timeout=timeout)
        except asyncio.TimeoutError:
            task.cancel()
            raise
        
        async with self._lock:
            return self._results.get(task_id, TaskResult(
                task_id=task_id,
                status=TaskStatus.FAILED,
                error=Exception("任务结果未找到")
            ))
    
    async def wait_all(self, timeout: Optional[float] = None) -> List[TaskResult]:
        """等待所有任务完成"""
        async with self._lock:
            tasks = list(self._tasks.values())
        
        if not tasks:
            return []
        
        try:
            await asyncio.wait_for(asyncio.gather(*tasks, return_exceptions=True), timeout=timeout)
        except asyncio.TimeoutError:
            # 取消未完成的任务
            for task in tasks:
                if not task.done():
                    task.cancel()
            raise
        
        async with self._lock:
            return list(self._results.values())
    
    async def cancel_task(self, task_id: str) -> bool:
        """取消任务"""
        async with self._lock:
            if task_id in self._tasks:
                task = self._tasks[task_id]
                if not task.done():
                    task.cancel()
                    self._results[task_id] = TaskResult(
                        task_id=task_id,
                        status=TaskStatus.CANCELLED
                    )
                    return True
            return False
    
    async def get_stats(self) -> Dict[str, Any]:
        """获取统计信息"""
        async with self._lock:
            total_tasks = len(self._results)
            completed = sum(1 for r in self._results.values() if r.status == TaskStatus.COMPLETED)
            failed = sum(1 for r in self._results.values() if r.status == TaskStatus.FAILED)
            cancelled = sum(1 for r in self._results.values() if r.status == TaskStatus.CANCELLED)
            
            avg_time = 0.0
            if completed > 0:
                total_time = sum(r.execution_time for r in self._results.values() 
                               if r.status == TaskStatus.COMPLETED)
                avg_time = total_time / completed
            
            return {
                'total_tasks': total_tasks,
                'completed': completed,
                'failed': failed,
                'cancelled': cancelled,
                'success_rate': completed / total_tasks if total_tasks > 0 else 0,
                'avg_execution_time': avg_time,
                'active_tasks': len([t for t in self._tasks.values() if not t.done()])
            }

class ConcurrentDataProcessor:
    """并发数据处理器"""
    
    def __init__(self, 
                 max_workers: int = None,
                 use_processes: bool = False):
        self.max_workers = max_workers or min(32, (multiprocessing.cpu_count() or 1) + 4)
        self.use_processes = use_processes
        self._cache = ThreadSafeCache()
        self._metrics = defaultdict(lambda: PerformanceMetrics(""))
    
    @contextmanager
    def get_executor(self):
        """获取执行器上下文管理器"""
        if self.use_processes:
            executor = ProcessPoolExecutor(max_workers=self.max_workers)
        else:
            executor = ThreadPoolExecutor(max_workers=self.max_workers)
        
        try:
            yield executor
        finally:
            executor.shutdown(wait=True)
    
    def process_batch(self, 
                     data: List[T], 
                     processor: Callable[[T], U],
                     batch_size: int = 100,
                     use_cache: bool = True) -> List[U]:
        """批量处理数据"""
        operation_name = f"batch_process_{processor.__name__}"
        start_time = time.perf_counter()
        
        try:
            # 分批处理
            batches = [data[i:i + batch_size] for i in range(0, len(data), batch_size)]
            results = []
            
            with self.get_executor() as executor:
                # 提交所有批次
                future_to_batch = {
                    executor.submit(self._process_single_batch, batch, processor, use_cache): batch
                    for batch in batches
                }
                
                # 收集结果
                for future in as_completed(future_to_batch):
                    try:
                        batch_results = future.result()
                        results.extend(batch_results)
                    except Exception as e:
                        logger.error(f"批次处理失败: {e}")
                        raise
            
            # 更新性能指标
            execution_time = time.perf_counter() - start_time
            metrics = self._metrics[operation_name]
            metrics.operation_name = operation_name
            metrics.total_tasks += len(data)
            metrics.completed_tasks += len(results)
            metrics.total_time += execution_time
            metrics.avg_task_time = metrics.total_time / metrics.completed_tasks if metrics.completed_tasks > 0 else 0
            metrics.throughput = metrics.completed_tasks / metrics.total_time if metrics.total_time > 0 else 0
            
            return results
            
        except Exception as e:
            logger.error(f"批量处理失败: {e}")
            raise
    
    def _process_single_batch(self, 
                             batch: List[T], 
                             processor: Callable[[T], U],
                             use_cache: bool) -> List[U]:
        """处理单个批次"""
        results = []
        
        for item in batch:
            try:
                # 检查缓存
                cache_key = None
                if use_cache and hasattr(item, '__hash__'):
                    try:
                        cache_key = (processor.__name__, hash(item))
                        cached_result = self._cache.get(cache_key)
                        if cached_result is not None:
                            results.append(cached_result)
                            continue
                    except TypeError:
                        # 对象不可哈希，跳过缓存
                        pass
                
                # 处理项目
                result = processor(item)
                results.append(result)
                
                # 缓存结果
                if cache_key is not None:
                    self._cache.put(cache_key, result)
                    
            except Exception as e:
                logger.error(f"处理项目失败: {item}, 错误: {e}")
                raise
        
        return results
    
    async def async_process_batch(self, 
                                 data: List[T], 
                                 async_processor: Callable[[T], Coroutine[Any, Any, U]],
                                 max_concurrent: int = 10) -> List[U]:
        """异步批量处理数据"""
        semaphore = asyncio.Semaphore(max_concurrent)
        
        async def process_item(item: T) -> U:
            async with semaphore:
                return await async_processor(item)
        
        tasks = [process_item(item) for item in data]
        return await asyncio.gather(*tasks)
    
    def get_metrics(self) -> Dict[str, PerformanceMetrics]:
        """获取性能指标"""
        return dict(self._metrics)
    
    def get_cache_stats(self) -> Dict[str, Any]:
        """获取缓存统计"""
        return self._cache.stats()

@asynccontextmanager
async def async_resource_manager(resource_factory: Callable[[], Any],
                               cleanup_func: Optional[Callable[[Any], Coroutine]] = None):
    """异步资源管理器"""
    resource = None
    try:
        resource = resource_factory()
        logger.info(f"资源已创建: {type(resource).__name__}")
        yield resource
    except Exception as e:
        logger.error(f"资源使用过程中出错: {e}")
        raise
    finally:
        if resource is not None:
            try:
                if cleanup_func:
                    await cleanup_func(resource)
                elif hasattr(resource, 'close'):
                    if asyncio.iscoroutinefunction(resource.close):
                        await resource.close()
                    else:
                        resource.close()
                logger.info(f"资源已清理: {type(resource).__name__}")
            except Exception as e:
                logger.error(f"资源清理失败: {e}")

class WorkerPool:
    """工作线程池"""
    
    def __init__(self, num_workers: int = 4):
        self.num_workers = num_workers
        self.task_queue = queue.Queue()
        self.result_queue = queue.Queue()
        self.workers = []
        self.shutdown_event = Event()
        self._start_workers()
    
    def _start_workers(self):
        """启动工作线程"""
        for i in range(self.num_workers):
            worker = threading.Thread(
                target=self._worker_loop,
                args=(f"worker_{i}",),
                daemon=True
            )
            worker.start()
            self.workers.append(worker)
    
    def _worker_loop(self, worker_id: str):
        """工作线程循环"""
        logger.info(f"工作线程 {worker_id} 已启动")
        
        while not self.shutdown_event.is_set():
            try:
                # 获取任务（带超时）
                task_data = self.task_queue.get(timeout=1.0)
                if task_data is None:  # 毒丸，停止工作线程
                    break
                
                task_id, func, args, kwargs = task_data
                
                try:
                    start_time = time.perf_counter()
                    result = func(*args, **kwargs)
                    execution_time = time.perf_counter() - start_time
                    
                    task_result = TaskResult(
                        task_id=task_id,
                        status=TaskStatus.COMPLETED,
                        result=result,
                        execution_time=execution_time,
                        worker_id=worker_id
                    )
                    
                except Exception as e:
                    execution_time = time.perf_counter() - start_time
                    task_result = TaskResult(
                        task_id=task_id,
                        status=TaskStatus.FAILED,
                        error=e,
                        execution_time=execution_time,
                        worker_id=worker_id
                    )
                    logger.error(f"任务 {task_id} 在工作线程 {worker_id} 中失败: {e}")
                
                self.result_queue.put(task_result)
                self.task_queue.task_done()
                
            except queue.Empty:
                continue
            except Exception as e:
                logger.error(f"工作线程 {worker_id} 出错: {e}")
        
        logger.info(f"工作线程 {worker_id} 已停止")
    
    def submit(self, func: Callable, *args, task_id: Optional[str] = None, **kwargs) -> str:
        """提交任务"""
        if task_id is None:
            task_id = f"task_{int(time.time() * 1000000)}"
        
        self.task_queue.put((task_id, func, args, kwargs))
        return task_id
    
    def get_result(self, timeout: Optional[float] = None) -> TaskResult:
        """获取结果"""
        return self.result_queue.get(timeout=timeout)
    
    def shutdown(self, wait: bool = True):
        """关闭线程池"""
        logger.info("正在关闭工作线程池...")
        
        # 发送停止信号
        self.shutdown_event.set()
        
        # 发送毒丸给所有工作线程
        for _ in range(self.num_workers):
            self.task_queue.put(None)
        
        if wait:
            # 等待所有工作线程结束
            for worker in self.workers:
                worker.join()
        
        logger.info("工作线程池已关闭")

# 演示函数
def demonstrate_thread_safe_collections():
    """演示线程安全集合"""
    print("\n======== 1. 线程安全集合 ========")
    
    # 线程安全计数器
    counter = ThreadSafeCounter()
    
    def increment_counter():
        for _ in range(1000):
            counter.increment()
    
    # 多线程测试
    threads = []
    for _ in range(5):
        thread = threading.Thread(target=increment_counter)
        threads.append(thread)
        thread.start()
    
    for thread in threads:
        thread.join()
    
    print(f"✓ 线程安全计数器最终值: {counter.get()} (期望: 5000)")
    
    # 线程安全缓存
    cache = ThreadSafeCache(max_size=100)
    
    def cache_operations():
        for i in range(50):
            cache.put(f"key_{i}", f"value_{i}")
            cache.get(f"key_{i % 25}")  # 访问一些已存在的键
    
    # 多线程缓存测试
    threads = []
    for _ in range(3):
        thread = threading.Thread(target=cache_operations)
        threads.append(thread)
        thread.start()
    
    for thread in threads:
        thread.join()
    
    stats = cache.stats()
    print(f"✓ 缓存统计: 大小={stats['size']}, 命中率={stats['hit_rate']:.2%}")

async def demonstrate_async_task_management():
    """演示异步任务管理"""
    print("\n======== 2. 异步任务管理 ========")
    
    manager = AsyncTaskManager(max_concurrent_tasks=5)
    
    # 定义一些异步任务
    async def async_computation(x: int, delay: float = 0.1) -> int:
        await asyncio.sleep(delay)
        return x ** 2
    
    async def async_failing_task(x: int) -> int:
        await asyncio.sleep(0.05)
        if x % 3 == 0:
            raise ValueError(f"任务 {x} 故意失败")
        return x * 2
    
    # 提交多个任务
    task_ids = []
    for i in range(10):
        if i < 7:
            task_id = await manager.submit_task(async_computation(i, 0.1))
        else:
            task_id = await manager.submit_task(async_failing_task(i))
        task_ids.append(task_id)
    
    print(f"✓ 已提交 {len(task_ids)} 个任务")
    
    # 等待所有任务完成
    try:
        results = await manager.wait_all(timeout=5.0)
        
        successful = [r for r in results if r.status == TaskStatus.COMPLETED]
        failed = [r for r in results if r.status == TaskStatus.FAILED]
        
        print(f"✓ 成功任务: {len(successful)}, 失败任务: {len(failed)}")
        
        if successful:
            avg_time = sum(r.execution_time for r in successful) / len(successful)
            print(f"✓ 平均执行时间: {avg_time:.4f}秒")
        
        # 显示统计信息
        stats = await manager.get_stats()
        print(f"✓ 任务统计: {stats}")
        
    except asyncio.TimeoutError:
        print("✗ 任务执行超时")

def demonstrate_concurrent_data_processing():
    """演示并发数据处理"""
    print("\n======== 3. 并发数据处理 ========")
    
    # 创建测试数据
    test_data = list(range(1000))
    
    def cpu_intensive_task(x: int) -> int:
        """CPU密集型任务"""
        result = 0
        for i in range(x % 100 + 1):
            result += i ** 2
        return result
    
    def io_intensive_task(x: int) -> int:
        """IO密集型任务（模拟）"""
        time.sleep(0.001)  # 模拟IO等待
        return x * 2
    
    # 测试线程池处理
    processor = ConcurrentDataProcessor(max_workers=4, use_processes=False)
    
    start_time = time.perf_counter()
    thread_results = processor.process_batch(
        test_data[:100], 
        io_intensive_task, 
        batch_size=20
    )
    thread_time = time.perf_counter() - start_time
    
    print(f"✓ 线程池处理 100 个IO任务: {thread_time:.4f}秒")
    
    # 测试进程池处理
    processor_proc = ConcurrentDataProcessor(max_workers=2, use_processes=True)
    
    start_time = time.perf_counter()
    process_results = processor_proc.process_batch(
        test_data[:100], 
        cpu_intensive_task, 
        batch_size=25
    )
    process_time = time.perf_counter() - start_time
    
    print(f"✓ 进程池处理 100 个CPU任务: {process_time:.4f}秒")
    
    # 显示性能指标
    thread_metrics = processor.get_metrics()
    process_metrics = processor_proc.get_metrics()
    
    for name, metrics in thread_metrics.items():
        print(f"✓ 线程池指标 - {name}: 吞吐量={metrics.throughput:.1f} 任务/秒")
    
    for name, metrics in process_metrics.items():
        print(f"✓ 进程池指标 - {name}: 吞吐量={metrics.throughput:.1f} 任务/秒")
    
    # 缓存统计
    cache_stats = processor.get_cache_stats()
    print(f"✓ 缓存统计: {cache_stats}")

async def demonstrate_async_data_processing():
    """演示异步数据处理"""
    print("\n======== 4. 异步数据处理 ========")
    
    processor = ConcurrentDataProcessor()
    
    async def async_api_call(x: int) -> Dict[str, Any]:
        """模拟异步API调用"""
        await asyncio.sleep(0.01)  # 模拟网络延迟
        return {
            'id': x,
            'result': x ** 2,
            'timestamp': time.time()
        }
    
    test_data = list(range(50))
    
    start_time = time.perf_counter()
    async_results = await processor.async_process_batch(
        test_data, 
        async_api_call, 
        max_concurrent=10
    )
    async_time = time.perf_counter() - start_time
    
    print(f"✓ 异步处理 {len(test_data)} 个API调用: {async_time:.4f}秒")
    print(f"✓ 平均每个调用: {async_time / len(test_data) * 1000:.2f}毫秒")
    print(f"✓ 结果示例: {async_results[:3]}")

def demonstrate_worker_pool():
    """演示工作线程池"""
    print("\n======== 5. 工作线程池 ========")
    
    pool = WorkerPool(num_workers=3)
    
    def compute_fibonacci(n: int) -> int:
        """计算斐波那契数"""
        if n <= 1:
            return n
        a, b = 0, 1
        for _ in range(2, n + 1):
            a, b = b, a + b
        return b
    
    # 提交任务
    task_ids = []
    for i in range(10, 20):
        task_id = pool.submit(compute_fibonacci, i)
        task_ids.append(task_id)
    
    print(f"✓ 已提交 {len(task_ids)} 个斐波那契计算任务")
    
    # 收集结果
    results = []
    completed_tasks = 0
    
    while completed_tasks < len(task_ids):
        try:
            result = pool.get_result(timeout=5.0)
            results.append(result)
            completed_tasks += 1
            
            if result.status == TaskStatus.COMPLETED:
                print(f"✓ 任务 {result.task_id} 完成: 结果={result.result}, "
                      f"耗时={result.execution_time:.4f}秒, 工作线程={result.worker_id}")
            else:
                print(f"✗ 任务 {result.task_id} 失败: {result.error}")
                
        except queue.Empty:
            print("等待任务完成超时")
            break
    
    # 关闭线程池
    pool.shutdown()
    
    successful_results = [r for r in results if r.status == TaskStatus.COMPLETED]
    if successful_results:
        avg_time = sum(r.execution_time for r in successful_results) / len(successful_results)
        print(f"✓ 平均任务执行时间: {avg_time:.4f}秒")

async def demonstrate_resource_management():
    """演示资源管理"""
    print("\n======== 6. 资源管理 ========")
    
    class MockDatabase:
        """模拟数据库连接"""
        def __init__(self, name: str):
            self.name = name
            self.connected = True
            print(f"  数据库 {name} 已连接")
        
        async def query(self, sql: str) -> List[Dict]:
            await asyncio.sleep(0.01)  # 模拟查询延迟
            return [{'id': 1, 'data': f'result for {sql}'}]
        
        async def close(self):
            self.connected = False
            print(f"  数据库 {self.name} 连接已关闭")
    
    # 使用异步资源管理器
    async with async_resource_manager(
        lambda: MockDatabase("test_db"),
        lambda db: db.close()
    ) as db:
        print("✓ 在资源管理器中使用数据库")
        results = await db.query("SELECT * FROM users")
        print(f"✓ 查询结果: {results}")
    
    print("✓ 资源已自动清理")

async def run_async_demonstrations():
    """运行异步演示"""
    await demonstrate_async_task_management()
    await demonstrate_async_data_processing()
    await demonstrate_resource_management()

def run_comprehensive_tests():
    """运行综合测试套件"""
    print("\n======== 7. 综合测试套件 ========")
    
    test_functions = [
        ("线程安全集合测试", demonstrate_thread_safe_collections),
        ("并发数据处理测试", demonstrate_concurrent_data_processing),
        ("工作线程池测试", demonstrate_worker_pool),
    ]
    
    passed = 0
    total = len(test_functions)
    
    for test_name, test_func in test_functions:
        try:
            test_func()
            print(f"✓ {test_name} 通过")
            passed += 1
        except Exception as e:
            print(f"✗ {test_name} 失败: {e}")
            logger.exception(f"测试失败: {test_name}")
    
    # 运行异步测试
    try:
        asyncio.run(run_async_demonstrations())
        print(f"✓ 异步功能测试 通过")
        passed += 1
        total += 1
    except Exception as e:
        print(f"✗ 异步功能测试 失败: {e}")
        logger.exception("异步测试失败")
        total += 1
    
    print(f"\n测试结果: {passed}/{total} 通过")
    print("所有企业级并发集合和异步编程最佳实践演示完成!")

if __name__ == "__main__":
    run_comprehensive_tests()