# -*- coding: utf-8 -*-
"""
Python 企业级生成器最佳实践示例

本模块演示了Python生成器的企业级最佳实践，包括:
- 生成器函数和生成器表达式
- 协程和异步生成器
- 生成器管道和数据流处理
- 内存高效的数据处理
- 生成器装饰器和上下文管理
- 错误处理和资源管理
- 性能优化和监控
"""

import asyncio
import functools
import logging
import time
import sys
from collections.abc import Iterator, Generator, AsyncGenerator
from contextlib import contextmanager, asynccontextmanager
from dataclasses import dataclass
from enum import Enum, auto
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, TypeVar, Union, Tuple
import threading
import queue
import weakref
import gc

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(levelname)s:%(name)s:%(message)s'
)
logger = logging.getLogger(__name__)

# 类型变量
T = TypeVar('T')
U = TypeVar('U')

class ProcessingStatus(Enum):
    """处理状态枚举"""
    PENDING = auto()
    PROCESSING = auto()
    COMPLETED = auto()
    FAILED = auto()

@dataclass
class DataItem:
    """数据项"""
    id: int
    data: Any
    timestamp: float
    status: ProcessingStatus = ProcessingStatus.PENDING
    metadata: Optional[Dict[str, Any]] = None

@dataclass
class ProcessingStats:
    """处理统计信息"""
    total_items: int = 0
    processed_items: int = 0
    failed_items: int = 0
    start_time: float = 0.0
    end_time: float = 0.0
    
    @property
    def success_rate(self) -> float:
        """成功率"""
        if self.total_items == 0:
            return 0.0
        return (self.processed_items / self.total_items) * 100
    
    @property
    def processing_time(self) -> float:
        """处理时间"""
        return self.end_time - self.start_time if self.end_time > 0 else 0.0

def fibonacci_generator(n: int) -> Generator[int, None, None]:
    """斐波那契数列生成器 - 内存高效
    
    Args:
        n: 生成前n个斐波那契数
    
    Yields:
        斐波那契数
    """
    if n <= 0:
        return
    
    a, b = 0, 1
    count = 0
    
    while count < n:
        yield a
        a, b = b, a + b
        count += 1

def prime_generator(limit: int) -> Generator[int, None, None]:
    """质数生成器 - 使用埃拉托斯特尼筛法
    
    Args:
        limit: 生成小于limit的所有质数
    
    Yields:
        质数
    """
    if limit < 2:
        return
    
    # 使用生成器实现筛法，节省内存
    def sieve():
        yield 2
        candidates = range(3, limit, 2)  # 只考虑奇数
        
        while candidates:
            prime = candidates[0]
            yield prime
            # 筛除当前质数的倍数
            candidates = [x for x in candidates if x % prime != 0]
    
    yield from sieve()

def batch_processor(items: Iterator[T], batch_size: int = 100) -> Generator[List[T], None, None]:
    """批处理生成器 - 将数据流分批处理
    
    Args:
        items: 输入数据流
        batch_size: 批大小
    
    Yields:
        数据批次
    """
    batch = []
    
    for item in items:
        batch.append(item)
        
        if len(batch) >= batch_size:
            yield batch
            batch = []
    
    # 处理剩余项目
    if batch:
        yield batch

def data_pipeline(*processors: Callable[[Iterator[T]], Iterator[U]]) -> Callable[[Iterator[T]], Iterator[U]]:
    """数据管道生成器 - 组合多个处理器
    
    Args:
        *processors: 处理器函数列表
    
    Returns:
        组合后的处理器
    """
    def pipeline(data: Iterator[T]) -> Iterator[U]:
        result = data
        for processor in processors:
            result = processor(result)
        return result
    
    return pipeline

def filter_processor(predicate: Callable[[T], bool]) -> Callable[[Iterator[T]], Iterator[T]]:
    """过滤处理器
    
    Args:
        predicate: 过滤条件
    
    Returns:
        过滤处理器函数
    """
    def processor(items: Iterator[T]) -> Iterator[T]:
        for item in items:
            if predicate(item):
                yield item
    
    return processor

def map_processor(transform: Callable[[T], U]) -> Callable[[Iterator[T]], Iterator[U]]:
    """映射处理器
    
    Args:
        transform: 转换函数
    
    Returns:
        映射处理器函数
    """
    def processor(items: Iterator[T]) -> Iterator[U]:
        for item in items:
            yield transform(item)
    
    return processor

def monitoring_processor(name: str, stats: ProcessingStats) -> Callable[[Iterator[T]], Iterator[T]]:
    """监控处理器 - 收集处理统计信息
    
    Args:
        name: 处理器名称
        stats: 统计信息对象
    
    Returns:
        监控处理器函数
    """
    def processor(items: Iterator[T]) -> Iterator[T]:
        stats.start_time = time.time()
        
        try:
            for item in items:
                stats.total_items += 1
                try:
                    yield item
                    stats.processed_items += 1
                except Exception as e:
                    stats.failed_items += 1
                    logger.error(f"处理器 {name} 处理项目失败: {e}")
                    raise
        finally:
            stats.end_time = time.time()
            logger.info(
                f"处理器 {name} 完成: 总计 {stats.total_items}，"
                f"成功 {stats.processed_items}，失败 {stats.failed_items}，"
                f"耗时 {stats.processing_time:.3f} 秒"
            )
    
    return processor

@contextmanager
def file_line_generator(file_path: Path, encoding: str = 'utf-8'):
    """文件行生成器上下文管理器 - 安全的文件处理
    
    Args:
        file_path: 文件路径
        encoding: 文件编码
    
    Yields:
        文件行生成器
    """
    file_handle = None
    try:
        file_handle = open(file_path, 'r', encoding=encoding)
        
        def line_generator():
            for line_num, line in enumerate(file_handle, 1):
                yield line_num, line.rstrip('\n\r')
        
        yield line_generator()
        
    except FileNotFoundError:
        logger.error(f"文件未找到: {file_path}")
        raise
    except UnicodeDecodeError as e:
        logger.error(f"文件编码错误: {e}")
        raise
    finally:
        if file_handle:
            file_handle.close()
            logger.debug(f"文件已关闭: {file_path}")

def memory_efficient_reader(file_path: Path, chunk_size: int = 8192) -> Generator[str, None, None]:
    """内存高效的文件读取器
    
    Args:
        file_path: 文件路径
        chunk_size: 块大小
    
    Yields:
        文件块
    """
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            while True:
                chunk = f.read(chunk_size)
                if not chunk:
                    break
                yield chunk
    except Exception as e:
        logger.error(f"读取文件失败 {file_path}: {e}")
        raise

async def async_data_generator(items: List[T], delay: float = 0.1) -> AsyncGenerator[T, None]:
    """异步数据生成器
    
    Args:
        items: 数据项列表
        delay: 每项之间的延迟
    
    Yields:
        数据项
    """
    for item in items:
        await asyncio.sleep(delay)
        yield item

async def async_processor(data_gen: AsyncGenerator[T, None], 
                         transform: Callable[[T], U]) -> AsyncGenerator[U, None]:
    """异步处理器
    
    Args:
        data_gen: 异步数据生成器
        transform: 转换函数
    
    Yields:
        处理后的数据
    """
    async for item in data_gen:
        try:
            result = transform(item)
            yield result
        except Exception as e:
            logger.error(f"异步处理失败: {e}")
            raise

def coroutine_example() -> Generator[None, str, str]:
    """协程示例 - 演示双向通信
    
    Yields:
        None (等待输入)
    
    Returns:
        处理结果
    """
    result = []
    
    try:
        while True:
            # 接收数据
            data = yield
            if data is None:
                break
            
            # 处理数据
            processed = data.upper() if isinstance(data, str) else str(data)
            result.append(processed)
            logger.debug(f"协程处理: {data} -> {processed}")
            
    except GeneratorExit:
        logger.info("协程正常退出")
    
    return ', '.join(result)

def infinite_sequence(start: int = 0, step: int = 1) -> Generator[int, None, None]:
    """无限序列生成器
    
    Args:
        start: 起始值
        step: 步长
    
    Yields:
        序列值
    """
    current = start
    while True:
        yield current
        current += step

def take(n: int, iterable: Iterator[T]) -> Generator[T, None, None]:
    """取前n个元素
    
    Args:
        n: 元素数量
        iterable: 可迭代对象
    
    Yields:
        前n个元素
    """
    for i, item in enumerate(iterable):
        if i >= n:
            break
        yield item

def demonstrate_basic_generators() -> None:
    """演示基础生成器"""
    print("\n======== 1. 基础生成器 ========")
    
    # 斐波那契数列
    fib_gen = fibonacci_generator(10)
    fib_numbers = list(fib_gen)
    print(f"✓ 斐波那契数列前10项: {fib_numbers}")
    
    # 质数生成器
    prime_gen = prime_generator(30)
    primes = list(prime_gen)
    print(f"✓ 30以内的质数: {primes}")
    
    # 生成器表达式
    squares = (x**2 for x in range(10) if x % 2 == 0)
    square_list = list(squares)
    print(f"✓ 偶数平方: {square_list}")
    
    # 无限序列（取前5个）
    infinite_gen = infinite_sequence(1, 3)
    first_five = list(take(5, infinite_gen))
    print(f"✓ 无限序列前5项: {first_five}")

def demonstrate_data_pipeline() -> None:
    """演示数据管道"""
    print("\n======== 2. 数据管道 ========")
    
    # 创建测试数据
    test_data = range(1, 21)  # 1到20
    
    # 创建处理管道
    stats = ProcessingStats()
    pipeline = data_pipeline(
        filter_processor(lambda x: x % 2 == 0),  # 过滤偶数
        map_processor(lambda x: x ** 2),         # 平方
        monitoring_processor("数据管道", stats)    # 监控
    )
    
    # 执行管道
    results = list(pipeline(iter(test_data)))
    print(f"✓ 管道处理结果: {results}")
    print(f"✓ 处理统计: 成功率 {stats.success_rate:.1f}%，耗时 {stats.processing_time:.3f}秒")

def demonstrate_batch_processing() -> None:
    """演示批处理"""
    print("\n======== 3. 批处理 ========")
    
    # 创建大量数据
    large_dataset = range(1, 26)  # 1到25
    
    # 批处理
    batch_gen = batch_processor(iter(large_dataset), batch_size=5)
    
    batch_count = 0
    total_items = 0
    
    for batch in batch_gen:
        batch_count += 1
        total_items += len(batch)
        print(f"✓ 批次 {batch_count}: {len(batch)} 项 - {batch}")
    
    print(f"✓ 总计: {batch_count} 批次，{total_items} 项")

def demonstrate_coroutine() -> None:
    """演示协程"""
    print("\n======== 4. 协程 ========")
    
    # 创建协程
    coro = coroutine_example()
    next(coro)  # 启动协程
    
    # 发送数据
    test_data = ["hello", "world", "python", "generator"]
    
    for data in test_data:
        coro.send(data)
        print(f"✓ 发送数据: {data}")
    
    # 关闭协程并获取结果
    try:
        result = coro.send(None)
    except StopIteration as e:
        result = e.value
    
    print(f"✓ 协程结果: {result}")

async def demonstrate_async_generators() -> None:
    """演示异步生成器"""
    print("\n======== 5. 异步生成器 ========")
    
    # 创建异步数据生成器
    test_data = ["item1", "item2", "item3", "item4"]
    async_gen = async_data_generator(test_data, delay=0.1)
    
    # 异步处理
    async_proc = async_processor(async_gen, lambda x: f"processed_{x}")
    
    results = []
    async for result in async_proc:
        results.append(result)
        print(f"✓ 异步处理结果: {result}")
    
    print(f"✓ 异步处理完成，共 {len(results)} 项")

def demonstrate_memory_efficiency() -> None:
    """演示内存效率"""
    print("\n======== 6. 内存效率 ========")
    
    # 比较列表和生成器的内存使用（使用sys.getsizeof）
    n = 100000
    
    # 使用列表（内存密集）
    large_list = [x**2 for x in range(n)]
    list_size = sys.getsizeof(large_list)
    
    # 使用生成器（内存高效）
    large_gen = (x**2 for x in range(n))
    gen_size = sys.getsizeof(large_gen)
    
    # 消费生成器的一部分
    consumed = list(take(10, large_gen))
    
    print(f"✓ 列表对象大小: {list_size:,} 字节 (~{list_size/1024/1024:.2f} MB)")
    print(f"✓ 生成器对象大小: {gen_size:,} 字节 (~{gen_size/1024:.2f} KB)")
    print(f"✓ 生成器前10项: {consumed}")
    print(f"✓ 内存节省: {((list_size - gen_size) / list_size * 100):.1f}%")
    
    # 演示生成器的惰性求值
    print("\n--- 惰性求值演示 ---")
    def expensive_operation(x):
        """模拟耗时操作"""
        time.sleep(0.001)  # 模拟计算时间
        return x ** 3
    
    # 列表推导式（立即计算所有值）
    start_time = time.time()
    eager_list = [expensive_operation(x) for x in range(100)]
    eager_time = time.time() - start_time
    
    # 生成器表达式（惰性计算）
    start_time = time.time()
    lazy_gen = (expensive_operation(x) for x in range(100))
    lazy_creation_time = time.time() - start_time
    
    # 只取前5个值
    start_time = time.time()
    first_five = list(take(5, lazy_gen))
    lazy_consumption_time = time.time() - start_time
    
    print(f"✓ 列表推导式创建时间: {eager_time:.3f} 秒")
    print(f"✓ 生成器创建时间: {lazy_creation_time:.6f} 秒")
    print(f"✓ 生成器消费前5项时间: {lazy_consumption_time:.3f} 秒")
    print(f"✓ 时间节省: {((eager_time - lazy_consumption_time) / eager_time * 100):.1f}%")
    
    del large_list  # 清理内存
    gc.collect()

async def run_async_tests() -> None:
    """运行异步测试"""
    await demonstrate_async_generators()

def run_comprehensive_tests() -> None:
    """运行综合测试套件"""
    print("\n======== 7. 综合测试套件 ========")
    
    test_functions = [
        ("基础生成器测试", demonstrate_basic_generators),
        ("数据管道测试", demonstrate_data_pipeline),
        ("批处理测试", demonstrate_batch_processing),
        ("协程测试", demonstrate_coroutine),
        ("内存效率测试", demonstrate_memory_efficiency),
    ]
    
    passed = 0
    total = len(test_functions)
    
    for test_name, test_func in test_functions:
        try:
            test_func()
            print(f"✓ {test_name} 通过")
            passed += 1
        except Exception as e:
            print(f"✗ {test_name} 失败: {e}")
            logger.exception(f"测试失败: {test_name}")
    
    # 运行异步测试
    try:
        asyncio.run(run_async_tests())
        print(f"✓ 异步生成器测试 通过")
        passed += 1
        total += 1
    except Exception as e:
        print(f"✗ 异步生成器测试 失败: {e}")
        logger.exception("异步测试失败")
        total += 1
    
    print(f"\n测试结果: {passed}/{total} 通过")
    print("所有企业级生成器最佳实践演示完成!")

if __name__ == "__main__":
    run_comprehensive_tests()