"""
数据流处理器

实现流式数据读取、批量处理、内存管理等功能。
"""

import asyncio
import time
from typing import Dict, List, Optional, Any, Iterator, AsyncIterator, Callable
from dataclasses import dataclass
from .logger import get_module_logger
from .exceptions import ESConnectionError, create_error_from_exception
from .retry import retry_on_es_error

logger = get_module_logger(__name__)


@dataclass
class StreamConfig:
    """数据流配置"""
    batch_size: int = 1000
    scroll_timeout: str = "5m"
    max_concurrent_requests: int = 4
    memory_limit_mb: int = 512
    backpressure_threshold: float = 0.8
    retry_attempts: int = 3


class DataStreamProcessor:
    """数据流处理器
    
    提供高效的ES数据流式读取和处理功能。
    """
    
    def __init__(self, es_client, config: Optional[StreamConfig] = None):
        """初始化数据流处理器
        
        Args:
            es_client: ES客户端
            config: 流配置
        """
        self.es_client = es_client
        self.config = config or StreamConfig()
        self._active_scrolls = set()
        self._memory_usage = 0
        self._processed_count = 0
        self._start_time = None
        
        logger.info(f"数据流处理器初始化，批量大小: {self.config.batch_size}")
    
    def stream_documents(
        self,
        index: str,
        query: Dict[str, Any],
        source_fields: Optional[List[str]] = None,
        sort_fields: Optional[List[Dict[str, Any]]] = None
    ) -> Iterator[Dict[str, Any]]:
        """流式读取文档
        
        Args:
            index: 索引名称
            query: 查询条件
            source_fields: 返回字段列表
            sort_fields: 排序字段
            
        Yields:
            文档数据
        """
        logger.info(f"开始流式读取文档: {index}")
        self._start_time = time.time()
        self._processed_count = 0
        
        try:
            # 构建滚动查询
            scroll_query = self._build_scroll_query(query, source_fields, sort_fields)
            
            # 初始化滚动
            response = self._execute_scroll_search(index, scroll_query)
            scroll_id = response.get('_scroll_id')
            
            if scroll_id:
                self._active_scrolls.add(scroll_id)
            
            # 处理初始批次
            hits = response['hits']['hits']
            total_docs = response['hits']['total']['value']
            
            logger.info(f"找到 {total_docs} 个文档，开始流式处理")
            
            while hits:
                # 检查内存使用
                self._check_memory_usage()
                
                # 处理当前批次
                for hit in hits:
                    yield self._process_document(hit)
                    self._processed_count += 1
                
                # 记录进度
                if self._processed_count % (self.config.batch_size * 10) == 0:
                    self._log_progress(total_docs)
                
                # 获取下一批次
                if scroll_id:
                    try:
                        response = self.es_client.scroll(scroll_id, self.config.scroll_timeout)
                        hits = response['hits']['hits']
                    except Exception as e:
                        logger.error(f"滚动查询失败: {e}")
                        break
                else:
                    break
            
            logger.info(f"流式读取完成，共处理 {self._processed_count} 个文档")
            
        except Exception as e:
            logger.error(f"流式读取失败: {e}")
            raise create_error_from_exception(e, f"流式读取失败: {index}")
        
        finally:
            # 清理滚动上下文
            if scroll_id and scroll_id in self._active_scrolls:
                self._cleanup_scroll(scroll_id)
    
    def stream_batches(
        self,
        index: str,
        query: Dict[str, Any],
        source_fields: Optional[List[str]] = None,
        sort_fields: Optional[List[Dict[str, Any]]] = None
    ) -> Iterator[List[Dict[str, Any]]]:
        """流式读取文档批次
        
        Args:
            index: 索引名称
            query: 查询条件
            source_fields: 返回字段列表
            sort_fields: 排序字段
            
        Yields:
            文档批次列表
        """
        batch = []
        
        for document in self.stream_documents(index, query, source_fields, sort_fields):
            batch.append(document)
            
            if len(batch) >= self.config.batch_size:
                yield batch
                batch = []
        
        # 处理最后一个不完整的批次
        if batch:
            yield batch
    
    async def async_stream_documents(
        self,
        index: str,
        query: Dict[str, Any],
        source_fields: Optional[List[str]] = None,
        sort_fields: Optional[List[Dict[str, Any]]] = None
    ) -> AsyncIterator[Dict[str, Any]]:
        """异步流式读取文档
        
        Args:
            index: 索引名称
            query: 查询条件
            source_fields: 返回字段列表
            sort_fields: 排序字段
            
        Yields:
            文档数据
        """
        # 在线程池中执行同步流式读取
        loop = asyncio.get_event_loop()
        
        def sync_generator():
            return self.stream_documents(index, query, source_fields, sort_fields)
        
        # 使用队列进行异步处理
        queue = asyncio.Queue(maxsize=self.config.batch_size * 2)
        
        async def producer():
            try:
                for document in await loop.run_in_executor(None, lambda: list(sync_generator())):
                    await queue.put(document)
            except Exception as e:
                await queue.put(e)
            finally:
                await queue.put(None)  # 结束标记
        
        # 启动生产者
        producer_task = asyncio.create_task(producer())
        
        try:
            while True:
                item = await queue.get()
                
                if item is None:  # 结束标记
                    break
                elif isinstance(item, Exception):
                    raise item
                else:
                    yield item
        finally:
            producer_task.cancel()
            try:
                await producer_task
            except asyncio.CancelledError:
                pass
    
    def process_stream_with_callback(
        self,
        index: str,
        query: Dict[str, Any],
        callback: Callable[[Dict[str, Any]], None],
        source_fields: Optional[List[str]] = None,
        error_handler: Optional[Callable[[Exception, Dict[str, Any]], None]] = None
    ) -> Dict[str, Any]:
        """使用回调函数处理数据流
        
        Args:
            index: 索引名称
            query: 查询条件
            callback: 文档处理回调函数
            source_fields: 返回字段列表
            error_handler: 错误处理回调函数
            
        Returns:
            处理统计信息
        """
        stats = {
            'processed_count': 0,
            'error_count': 0,
            'start_time': time.time(),
            'end_time': None,
            'duration': 0
        }
        
        try:
            for document in self.stream_documents(index, query, source_fields):
                try:
                    callback(document)
                    stats['processed_count'] += 1
                except Exception as e:
                    stats['error_count'] += 1
                    if error_handler:
                        error_handler(e, document)
                    else:
                        logger.error(f"处理文档时出错: {e}")
        
        except Exception as e:
            logger.error(f"流处理失败: {e}")
            raise
        
        finally:
            stats['end_time'] = time.time()
            stats['duration'] = stats['end_time'] - stats['start_time']
        
        return stats
    
    def _build_scroll_query(
        self,
        query: Dict[str, Any],
        source_fields: Optional[List[str]] = None,
        sort_fields: Optional[List[Dict[str, Any]]] = None
    ) -> Dict[str, Any]:
        """构建滚动查询
        
        Args:
            query: 基础查询
            source_fields: 返回字段
            sort_fields: 排序字段
            
        Returns:
            滚动查询配置
        """
        scroll_query = {
            "size": self.config.batch_size,
            "query": query
        }
        
        if source_fields:
            scroll_query["_source"] = source_fields
        
        if sort_fields:
            scroll_query["sort"] = sort_fields
        else:
            # 默认按_doc排序以提高性能
            scroll_query["sort"] = ["_doc"]
        
        return scroll_query
    
    @retry_on_es_error(max_attempts=3)
    def _execute_scroll_search(self, index: str, query: Dict[str, Any]) -> Dict[str, Any]:
        """执行滚动搜索
        
        Args:
            index: 索引名称
            query: 查询配置
            
        Returns:
            搜索结果
        """
        return self.es_client.search(
            index=index,
            body=query,
            scroll=self.config.scroll_timeout
        )
    
    def _process_document(self, hit: Dict[str, Any]) -> Dict[str, Any]:
        """处理单个文档
        
        Args:
            hit: ES返回的hit对象
            
        Returns:
            处理后的文档
        """
        # 提取文档数据
        document = {
            '_id': hit['_id'],
            '_index': hit['_index'],
            '_source': hit['_source']
        }
        
        # 添加元数据（如果需要）
        if '_score' in hit:
            document['_score'] = hit['_score']
        
        if 'sort' in hit:
            document['_sort'] = hit['sort']
        
        # 更新内存使用估算
        self._update_memory_usage(document)
        
        return document
    
    def _update_memory_usage(self, document: Dict[str, Any]) -> None:
        """更新内存使用估算
        
        Args:
            document: 文档数据
        """
        # 简单的内存使用估算（基于JSON字符串长度）
        import json
        doc_size = len(json.dumps(document, ensure_ascii=False).encode('utf-8'))
        self._memory_usage += doc_size
    
    def _check_memory_usage(self) -> None:
        """检查内存使用情况"""
        memory_limit_bytes = self.config.memory_limit_mb * 1024 * 1024
        usage_ratio = self._memory_usage / memory_limit_bytes
        
        if usage_ratio > self.config.backpressure_threshold:
            logger.warning(f"内存使用率达到 {usage_ratio:.1%}，触发背压控制")
            # 简单的背压控制：暂停一段时间
            time.sleep(0.1)
            
            # 重置内存计数器（简化处理）
            self._memory_usage = 0
    
    def _log_progress(self, total_docs: int) -> None:
        """记录处理进度
        
        Args:
            total_docs: 总文档数
        """
        if self._start_time:
            elapsed = time.time() - self._start_time
            rate = self._processed_count / elapsed if elapsed > 0 else 0
            progress = (self._processed_count / total_docs * 100) if total_docs > 0 else 0
            
            logger.info(
                f"处理进度: {self._processed_count}/{total_docs} ({progress:.1f}%), "
                f"速度: {rate:.1f} docs/sec"
            )
    
    def _cleanup_scroll(self, scroll_id: str) -> None:
        """清理滚动上下文
        
        Args:
            scroll_id: 滚动ID
        """
        try:
            self.es_client.clear_scroll(scroll_id)
            self._active_scrolls.discard(scroll_id)
            logger.debug(f"清理滚动上下文: {scroll_id}")
        except Exception as e:
            logger.warning(f"清理滚动上下文失败: {e}")
    
    def get_stream_stats(self) -> Dict[str, Any]:
        """获取流处理统计信息
        
        Returns:
            统计信息
        """
        current_time = time.time()
        elapsed = current_time - self._start_time if self._start_time else 0
        
        return {
            'processed_count': self._processed_count,
            'elapsed_time': elapsed,
            'processing_rate': self._processed_count / elapsed if elapsed > 0 else 0,
            'memory_usage_mb': self._memory_usage / (1024 * 1024),
            'active_scrolls': len(self._active_scrolls),
            'config': {
                'batch_size': self.config.batch_size,
                'memory_limit_mb': self.config.memory_limit_mb,
                'scroll_timeout': self.config.scroll_timeout
            }
        }
    
    def cleanup(self) -> None:
        """清理资源"""
        # 清理所有活跃的滚动上下文
        for scroll_id in list(self._active_scrolls):
            self._cleanup_scroll(scroll_id)
        
        logger.info("数据流处理器资源清理完成")
    
    def __enter__(self):
        """上下文管理器入口"""
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        """上下文管理器出口"""
        self.cleanup()


class ParallelDataStreamProcessor:
    """并行数据流处理器
    
    支持多个并发数据流的处理。
    """
    
    def __init__(self, es_client, config: Optional[StreamConfig] = None):
        """初始化并行数据流处理器
        
        Args:
            es_client: ES客户端
            config: 流配置
        """
        self.es_client = es_client
        self.config = config or StreamConfig()
        self.processors = []
    
    async def process_multiple_streams(
        self,
        stream_configs: List[Dict[str, Any]],
        callback: Callable[[str, Dict[str, Any]], None]
    ) -> Dict[str, Any]:
        """并行处理多个数据流
        
        Args:
            stream_configs: 流配置列表，每个包含index、query等信息
            callback: 处理回调函数，接收(stream_id, document)参数
            
        Returns:
            处理统计信息
        """
        semaphore = asyncio.Semaphore(self.config.max_concurrent_requests)
        
        async def process_single_stream(stream_id: str, stream_config: Dict[str, Any]):
            async with semaphore:
                processor = DataStreamProcessor(self.es_client, self.config)
                self.processors.append(processor)
                
                try:
                    async for document in processor.async_stream_documents(
                        stream_config['index'],
                        stream_config['query'],
                        stream_config.get('source_fields'),
                        stream_config.get('sort_fields')
                    ):
                        callback(stream_id, document)
                finally:
                    processor.cleanup()
        
        # 创建并发任务
        tasks = []
        for i, stream_config in enumerate(stream_configs):
            stream_id = stream_config.get('id', f'stream_{i}')
            task = asyncio.create_task(process_single_stream(stream_id, stream_config))
            tasks.append(task)
        
        # 等待所有任务完成
        try:
            await asyncio.gather(*tasks)
        except Exception as e:
            logger.error(f"并行流处理失败: {e}")
            raise
        
        # 收集统计信息
        total_stats = {
            'total_streams': len(stream_configs),
            'total_processed': sum(p.get_stream_stats()['processed_count'] for p in self.processors),
            'processors': [p.get_stream_stats() for p in self.processors]
        }
        
        return total_stats
    
    def cleanup(self) -> None:
        """清理所有处理器"""
        for processor in self.processors:
            processor.cleanup()
        self.processors.clear()
