"""
InfluxDB Batch Writer Service

This service provides efficient batch writing capabilities for InfluxDB with:
- Automatic batching and buffering
- Retry logic with exponential backoff
- Performance monitoring and metrics
- Error handling and logging

Example:
    Basic usage:
    
    >>> from app.services.influxdb_writer_service import InfluxDBWriterService
    >>> from app.adapters.core import Bar
    >>> 
    >>> writer = InfluxDBWriterService(
    ...     adapter=influxdb_adapter,
    ...     batch_size=1000,
    ...     flush_interval=5.0
    ... )
    >>> await writer.start()
    >>> 
    >>> # Write data points
    >>> bars = [...]  # List of Bar objects
    >>> await writer.write_bars(bars)
    >>> 
    >>> await writer.stop()
"""

import asyncio
import logging
from typing import List, Optional, Dict, Any
from datetime import datetime
from dataclasses import dataclass, field
from collections import deque

from ...adapters.core import Bar, Tick, Orderbook
from ...adapters.core.exceptions import AdapterError, is_retryable_error, get_retry_delay

# 导入监控工具
try:
    from backend.redfire_framework.monitoring import (
        increment, set_gauge, observe_histogram, timer
    )
    MONITORING_AVAILABLE = True
except ImportError:
    MONITORING_AVAILABLE = False
    
    # 定义占位函数
    def increment(name: str, **kwargs): pass
    def set_gauge(name: str, value: float, **kwargs): pass
    def observe_histogram(name: str, value: float, **kwargs): pass
    def timer(name: str): return nullcontext()
    
    from contextlib import nullcontext

logger = logging.getLogger(__name__)


@dataclass
class WriterMetrics:
    """Writer service metrics"""
    total_writes: int = 0
    successful_writes: int = 0
    failed_writes: int = 0
    total_points: int = 0
    buffered_points: int = 0
    batch_count: int = 0
    retry_count: int = 0
    last_flush_time: Optional[datetime] = None
    last_error: Optional[str] = None
    

class InfluxDBWriterService:
    """
    High-performance batch writer for InfluxDB.
    
    Features:
    - Automatic batching based on size or time
    - Asynchronous writing with queue
    - Retry logic for failed writes
    - Performance metrics and monitoring
    - Graceful shutdown with flush
    
    Attributes:
        adapter: InfluxDB adapter instance
        batch_size: Maximum points per batch
        flush_interval: Maximum seconds before auto-flush
        max_retries: Maximum retry attempts for failed writes
        buffer: Internal buffer for data points
        metrics: Performance metrics
    """
    
    def __init__(
        self,
        adapter: Any,  # InfluxDB adapter
        batch_size: int = 1000,
        flush_interval: float = 5.0,
        max_retries: int = 3,
        max_buffer_size: int = 10000,
    ):
        """
        Initialize the writer service.
        
        Args:
            adapter: InfluxDB adapter instance
            batch_size: Maximum points per batch (default: 1000)
            flush_interval: Auto-flush interval in seconds (default: 5.0)
            max_retries: Maximum retry attempts (default: 3)
            max_buffer_size: Maximum buffer size (default: 10000)
        """
        self.adapter = adapter
        self.batch_size = batch_size
        self.flush_interval = flush_interval
        self.max_retries = max_retries
        self.max_buffer_size = max_buffer_size
        
        # Internal state
        self.buffer: deque = deque()
        self.metrics = WriterMetrics()
        self._flush_task: Optional[asyncio.Task] = None
        self._worker_task: Optional[asyncio.Task] = None
        self._shutdown_event = asyncio.Event()
        self._flush_lock = asyncio.Lock()
        self._running = False
        
        logger.info(
            f"InfluxDB Writer Service initialized: "
            f"batch_size={batch_size}, flush_interval={flush_interval}s, "
            f"max_retries={max_retries}, max_buffer_size={max_buffer_size}"
        )
    
    async def start(self) -> None:
        """
        Start the writer service background tasks.
        
        This starts:
        - Auto-flush task (periodic flush based on interval)
        - Worker task (processes buffered data)
        """
        if self._running:
            logger.warning("Writer service is already running")
            return
        
        self._running = True
        self._shutdown_event.clear()
        
        # Start background tasks
        self._flush_task = asyncio.create_task(self._auto_flush_loop())
        self._worker_task = asyncio.create_task(self._worker_loop())
        
        logger.info("InfluxDB Writer Service started")
        
        # Record metric
        if MONITORING_AVAILABLE:
            increment("influxdb_writer_starts_total")
    
    async def stop(self, timeout: float = 30.0) -> None:
        """
        Stop the writer service and flush remaining data.
        
        Args:
            timeout: Maximum time to wait for flush (seconds)
        """
        if not self._running:
            logger.warning("Writer service is not running")
            return
        
        logger.info("Stopping InfluxDB Writer Service...")
        
        # Signal shutdown
        self._shutdown_event.set()
        self._running = False
        
        # Cancel background tasks
        if self._flush_task:
            self._flush_task.cancel()
            try:
                await self._flush_task
            except asyncio.CancelledError:
                pass
        
        if self._worker_task:
            self._worker_task.cancel()
            try:
                await self._worker_task
            except asyncio.CancelledError:
                pass
        
        # Final flush
        try:
            await asyncio.wait_for(self.flush(), timeout=timeout)
        except asyncio.TimeoutError:
            logger.error(f"Flush timeout after {timeout}s, some data may be lost")
        
        logger.info(
            f"InfluxDB Writer Service stopped. "
            f"Final metrics: {self.metrics.successful_writes} successful writes, "
            f"{self.metrics.failed_writes} failed writes"
        )
        
        # Record metric
        if MONITORING_AVAILABLE:
            increment("influxdb_writer_stops_total")
    
    async def cleanup(self) -> None:
        """
        Cleanup method for compatibility with service lifecycle.
        This is an alias for stop() method.
        """
        await self.stop()
    
    async def health_check(self) -> bool:
        """
        Check the health of the writer service.
        
        Returns:
            True if service is healthy, False otherwise
        """
        try:
            # Check if service is running
            if not self._running:
                logger.warning("Writer service is not running")
                return False
            
            # Check if adapter is available and connected
            if not self.adapter:
                logger.error("InfluxDB adapter is not available")
                return False
            
            # Check adapter health if available
            if hasattr(self.adapter, 'health_check'):
                adapter_healthy = await self.adapter.health_check()
                if not adapter_healthy:
                    logger.warning("InfluxDB adapter health check failed")
                    return False
            
            # Check buffer size (warn if too large)
            if self.metrics.buffered_points > self.max_buffer_size * 0.9:
                logger.warning(
                    f"Buffer is nearly full: {self.metrics.buffered_points}/{self.max_buffer_size}"
                )
            
            return True
            
        except Exception as e:
            logger.error(f"Health check failed: {e}", exc_info=True)
            return False
    
    async def write_bars(self, bars: List[Bar], measurement: str = "bars") -> None:
        """
        Write K-line bars to buffer.
        
        Args:
            bars: List of Bar objects
            measurement: InfluxDB measurement name (default: "bars")
        """
        if not bars:
            return
        
        # Convert bars to InfluxDB points
        points = []
        for bar in bars:
            try:
                point = bar.to_influx_point()
                point['measurement'] = measurement
                points.append(point)
            except Exception as e:
                logger.error(f"Failed to convert bar to point: {e}", exc_info=True)
                self.metrics.failed_writes += 1
                continue
        
        # Add to buffer
        await self._add_to_buffer(points)
    
    async def write_ticks(self, ticks: List[Tick], measurement: str = "ticks") -> None:
        """
        Write tick data to buffer.
        
        Args:
            ticks: List of Tick objects
            measurement: InfluxDB measurement name (default: "ticks")
        """
        if not ticks:
            return
        
        # Convert ticks to InfluxDB points
        points = []
        for tick in ticks:
            try:
                point = tick.to_influx_point()
                point['measurement'] = measurement
                points.append(point)
            except Exception as e:
                logger.error(f"Failed to convert tick to point: {e}", exc_info=True)
                self.metrics.failed_writes += 1
                continue
        
        # Add to buffer
        await self._add_to_buffer(points)
    
    async def write_orderbooks(
        self, 
        orderbooks: List[Orderbook], 
        measurement: str = "orderbooks"
    ) -> None:
        """
        Write orderbook data to buffer.
        
        Args:
            orderbooks: List of Orderbook objects
            measurement: InfluxDB measurement name (default: "orderbooks")
        """
        if not orderbooks:
            return
        
        # Convert orderbooks to InfluxDB points
        points = []
        for orderbook in orderbooks:
            try:
                point = orderbook.to_influx_point()
                point['measurement'] = measurement
                points.append(point)
            except Exception as e:
                logger.error(f"Failed to convert orderbook to point: {e}", exc_info=True)
                self.metrics.failed_writes += 1
                continue
        
        # Add to buffer
        await self._add_to_buffer(points)
    
    async def write_points(self, points: List[Dict[str, Any]]) -> None:
        """
        Write raw InfluxDB points to buffer.
        
        Args:
            points: List of InfluxDB point dictionaries
        """
        if not points:
            return
        
        await self._add_to_buffer(points)
    
    async def flush(self) -> None:
        """
        Flush all buffered data to InfluxDB.
        
        This method is safe to call concurrently.
        """
        async with self._flush_lock:
            if not self.buffer:
                return
            
            # Get all points from buffer
            points = list(self.buffer)
            self.buffer.clear()
            
            logger.info(f"Flushing {len(points)} points to InfluxDB...")
            
            # Write in batches
            await self._write_batches(points)
            
            # Update metrics
            self.metrics.last_flush_time = datetime.utcnow()
            self.metrics.buffered_points = 0
            
            # Record metrics
            if MONITORING_AVAILABLE:
                set_gauge("influxdb_writer_buffer_size", 0)
    
    def get_metrics(self) -> WriterMetrics:
        """
        Get current service metrics.
        
        Returns:
            WriterMetrics object with current statistics
        """
        self.metrics.buffered_points = len(self.buffer)
        return self.metrics
    
    async def _add_to_buffer(self, points: List[Dict[str, Any]]) -> None:
        """
        Add points to buffer and trigger flush if needed.
        
        Args:
            points: List of InfluxDB point dictionaries
        """
        # Check buffer size limit
        if len(self.buffer) + len(points) > self.max_buffer_size:
            logger.warning(
                f"Buffer size limit reached ({self.max_buffer_size}), "
                f"triggering immediate flush"
            )
            await self.flush()
        
        # Add to buffer
        self.buffer.extend(points)
        self.metrics.buffered_points = len(self.buffer)
        self.metrics.total_points += len(points)
        
        # Record metrics
        if MONITORING_AVAILABLE:
            increment("influxdb_writer_points_added_total", value=len(points))
            set_gauge("influxdb_writer_buffer_size", len(self.buffer))
        
        # Trigger flush if batch size reached
        if len(self.buffer) >= self.batch_size:
            asyncio.create_task(self.flush())
    
    async def _write_batches(self, points: List[Dict[str, Any]]) -> None:
        """
        Write points to InfluxDB in batches with retry logic.
        
        Args:
            points: List of InfluxDB point dictionaries
        """
        # Split into batches
        batches = [
            points[i:i + self.batch_size]
            for i in range(0, len(points), self.batch_size)
        ]
        
        logger.debug(f"Writing {len(batches)} batches to InfluxDB...")
        
        for batch_idx, batch in enumerate(batches):
            success = await self._write_batch_with_retry(batch, batch_idx)
            
            if success:
                self.metrics.successful_writes += 1
                self.metrics.batch_count += 1
            else:
                self.metrics.failed_writes += 1
    
    async def _write_batch_with_retry(
        self, 
        batch: List[Dict[str, Any]], 
        batch_idx: int
    ) -> bool:
        """
        Write a single batch with retry logic.
        
        Args:
            batch: List of points for this batch
            batch_idx: Batch index for logging
            
        Returns:
            True if successful, False otherwise
        """
        for attempt in range(self.max_retries + 1):
            try:
                # Measure write time
                start_time = datetime.utcnow()
                
                # Write to InfluxDB
                await self.adapter.write_batch(batch)
                
                # Calculate duration
                duration = (datetime.utcnow() - start_time).total_seconds()
                
                logger.debug(
                    f"Batch {batch_idx} written successfully "
                    f"({len(batch)} points in {duration:.3f}s)"
                )
                
                # Record metrics
                if MONITORING_AVAILABLE:
                    increment("influxdb_writer_batches_success_total")
                    observe_histogram("influxdb_writer_batch_duration_seconds", duration)
                    observe_histogram("influxdb_writer_batch_size", len(batch))
                
                return True
                
            except Exception as e:
                self.metrics.last_error = str(e)
                self.metrics.retry_count += 1
                
                logger.error(
                    f"Batch {batch_idx} write failed (attempt {attempt + 1}/{self.max_retries + 1}): {e}",
                    exc_info=attempt == self.max_retries
                )
                
                # Record metrics
                if MONITORING_AVAILABLE:
                    increment("influxdb_writer_errors_total")
                
                # Check if we should retry
                if attempt < self.max_retries and is_retryable_error(e):
                    delay = get_retry_delay(attempt)
                    logger.info(f"Retrying in {delay}s...")
                    await asyncio.sleep(delay)
                else:
                    # Record permanent failure
                    if MONITORING_AVAILABLE:
                        increment("influxdb_writer_batches_failed_total")
                    return False
        
        return False
    
    async def _auto_flush_loop(self) -> None:
        """
        Background task that periodically flushes the buffer.
        """
        logger.info(f"Auto-flush loop started (interval={self.flush_interval}s)")
        
        while not self._shutdown_event.is_set():
            try:
                # Wait for flush interval or shutdown
                await asyncio.wait_for(
                    self._shutdown_event.wait(),
                    timeout=self.flush_interval
                )
                # If we get here, shutdown was signaled
                break
                
            except asyncio.TimeoutError:
                # Timeout means it's time to flush
                pass
            
            # Flush if there's data
            if self.buffer:
                try:
                    await self.flush()
                except Exception as e:
                    logger.error(f"Auto-flush failed: {e}", exc_info=True)
        
        logger.info("Auto-flush loop stopped")
    
    async def _worker_loop(self) -> None:
        """
        Background worker task (for future use).
        
        Currently not used, but reserved for future enhancements like:
        - Background data compression
        - Data aggregation
        - Queue-based processing
        """
        logger.debug("Worker loop started")
        
        while not self._shutdown_event.is_set():
            await asyncio.sleep(1)
        
        logger.debug("Worker loop stopped")


# Singleton instance for global access (optional)
_writer_instance: Optional[InfluxDBWriterService] = None


def get_writer() -> Optional[InfluxDBWriterService]:
    """
    Get the global writer service instance.
    
    Returns:
        InfluxDBWriterService instance or None if not initialized
    """
    return _writer_instance


def set_writer(writer: InfluxDBWriterService) -> None:
    """
    Set the global writer service instance.
    
    Args:
        writer: InfluxDBWriterService instance
    """
    global _writer_instance
    _writer_instance = writer


async def create_writer_service(
    adapter: Any,
    batch_size: int = 1000,
    flush_interval: float = 5.0,
    max_retries: int = 3,
    auto_start: bool = True,
) -> InfluxDBWriterService:
    """
    Create and optionally start a writer service.
    
    Args:
        adapter: InfluxDB adapter instance
        batch_size: Maximum points per batch
        flush_interval: Auto-flush interval in seconds
        max_retries: Maximum retry attempts
        auto_start: Automatically start the service
        
    Returns:
        InfluxDBWriterService instance
    """
    writer = InfluxDBWriterService(
        adapter=adapter,
        batch_size=batch_size,
        flush_interval=flush_interval,
        max_retries=max_retries,
    )
    
    if auto_start:
        await writer.start()
    
    return writer

