"""BLE Large Data Handler for efficient big data transfers.

Handles data chunking, flow control, buffer management,
and optimization for transferring large files over BLE.
"""

import asyncio
import hashlib
import zlib
import time
import logging
from enum import Enum
from typing import Dict, List, Optional, Callable, Any, Tuple
from dataclasses import dataclass, field
from collections import OrderedDict, deque
import gc
import os

from .fragmentation import FragmentationProtocol, TransferState

logger = logging.getLogger(__name__)


# Constants
CHUNK_SIZE = 4096  # 4KB per chunk
MAX_CHUNKS_IN_MEMORY = 10  # Maximum chunks to keep in memory
COMPRESSION_THRESHOLD = 1024  # Compress chunks larger than 1KB
COMPRESSION_LEVEL = 6  # zlib compression level (1-9)
RING_BUFFER_SIZE = 64 * 1024  # 64KB ring buffer
HIGH_WATER_MARK = 0.8  # 80% buffer full
LOW_WATER_MARK = 0.2  # 20% buffer full


class CongestionState(Enum):
    """TCP-like congestion control states."""
    SLOW_START = 1          # Exponential window growth
    CONGESTION_AVOIDANCE = 2  # Linear window growth
    FAST_RECOVERY = 3       # Fast retransmit/recovery
    TIMEOUT = 4            # Timeout recovery


class TransferPriority(Enum):
    """Transfer priority levels."""
    HIGH = 0
    NORMAL = 1
    LOW = 2


@dataclass
class ChunkMetadata:
    """Metadata for a data chunk."""
    chunk_id: int
    offset: int
    size: int
    checksum: str
    compressed: bool = False
    original_size: int = 0
    timestamp: float = field(default_factory=time.time)


@dataclass
class TransferSession:
    """Transfer session information."""
    session_id: str
    file_path: Optional[str]
    total_size: int
    transferred: int = 0
    chunks_completed: List[int] = field(default_factory=list)
    chunks_total: int = 0
    timestamp: float = field(default_factory=time.time)
    checksum: Optional[str] = None
    priority: TransferPriority = TransferPriority.NORMAL
    compression_enabled: bool = True
    
    @property
    def progress(self) -> float:
        """Get transfer progress percentage."""
        if self.total_size > 0:
            return (self.transferred / self.total_size) * 100
        return 0
    
    @property
    def is_complete(self) -> bool:
        """Check if transfer is complete."""
        return self.transferred >= self.total_size


@dataclass
class PerformanceMetrics:
    """Transfer performance metrics."""
    start_time: float = field(default_factory=time.time)
    bytes_sent: int = 0
    bytes_received: int = 0
    chunks_sent: int = 0
    chunks_received: int = 0
    compression_ratio: float = 1.0
    memory_usage: int = 0
    cpu_time: float = 0
    retransmissions: int = 0
    
    @property
    def throughput(self) -> float:
        """Calculate throughput in bytes per second."""
        duration = time.time() - self.start_time
        if duration > 0:
            return (self.bytes_sent + self.bytes_received) / duration
        return 0
    
    @property
    def efficiency(self) -> float:
        """Calculate transfer efficiency."""
        if self.chunks_sent > 0:
            return 1.0 - (self.retransmissions / self.chunks_sent)
        return 1.0


class FlowController:
    """Flow control using sliding window protocol."""
    
    def __init__(self, initial_window: int = 16):
        """Initialize flow controller.
        
        Args:
            initial_window: Initial window size
        """
        self.window_size = initial_window
        self.min_window = 4
        self.max_window = 64
        self.congestion_threshold = 32
        
        self.state = CongestionState.SLOW_START
        self.in_flight = 0
        self.acked = 0
        
        # RTT estimation
        self.rtt_estimate = 0.1  # 100ms initial
        self.rtt_variance = 0.05
        self.rtt_alpha = 0.125
        self.rtt_beta = 0.25
        
        # Congestion events
        self.last_congestion_time = 0
        self.congestion_events = 0
        
        logger.info(f"Flow controller initialized with window size {initial_window}")
    
    def can_send(self) -> bool:
        """Check if can send more data.
        
        Returns:
            True if window allows sending
        """
        return self.in_flight < self.window_size
    
    def on_send(self):
        """Called when data is sent."""
        self.in_flight += 1
    
    def on_ack(self, rtt: float):
        """Handle acknowledgment.
        
        Args:
            rtt: Round trip time for this packet
        """
        self.in_flight = max(0, self.in_flight - 1)
        self.acked += 1
        
        # Update RTT estimate
        self._update_rtt(rtt)
        
        # Adjust window based on state
        if self.state == CongestionState.SLOW_START:
            self.window_size = min(self.window_size + 1, self.max_window)
            if self.window_size >= self.congestion_threshold:
                self.state = CongestionState.CONGESTION_AVOIDANCE
                logger.debug("Entering congestion avoidance")
                
        elif self.state == CongestionState.CONGESTION_AVOIDANCE:
            # Increase window by 1/window_size (linear growth)
            if self.acked >= self.window_size:
                self.window_size = min(self.window_size + 1, self.max_window)
                self.acked = 0
    
    def on_timeout(self):
        """Handle timeout event."""
        self.congestion_events += 1
        self.last_congestion_time = time.time()
        
        # Enter timeout recovery
        self.state = CongestionState.TIMEOUT
        self.congestion_threshold = max(self.window_size // 2, self.min_window)
        self.window_size = self.min_window
        self.in_flight = 0
        
        logger.warning(f"Timeout: window reduced to {self.window_size}")
    
    def on_loss(self):
        """Handle packet loss (fast retransmit)."""
        if self.state != CongestionState.FAST_RECOVERY:
            self.congestion_events += 1
            self.state = CongestionState.FAST_RECOVERY
            self.congestion_threshold = max(self.window_size // 2, self.min_window)
            self.window_size = self.congestion_threshold
            
            logger.debug(f"Fast recovery: window reduced to {self.window_size}")
    
    def _update_rtt(self, measured_rtt: float):
        """Update RTT estimates.
        
        Args:
            measured_rtt: Measured round trip time
        """
        if self.rtt_estimate == 0:
            self.rtt_estimate = measured_rtt
            self.rtt_variance = measured_rtt / 2
        else:
            # Jacobson/Karels algorithm
            diff = measured_rtt - self.rtt_estimate
            self.rtt_estimate += self.rtt_alpha * diff
            self.rtt_variance += self.rtt_beta * (abs(diff) - self.rtt_variance)
    
    def get_timeout(self) -> float:
        """Get timeout value based on RTT estimate.
        
        Returns:
            Timeout in seconds
        """
        # RTO = RTT + 4 * RTT_variance
        return self.rtt_estimate + 4 * self.rtt_variance
    
    def get_stats(self) -> Dict[str, Any]:
        """Get flow control statistics.
        
        Returns:
            Statistics dictionary
        """
        return {
            "window_size": self.window_size,
            "in_flight": self.in_flight,
            "state": self.state.name,
            "rtt_estimate": self.rtt_estimate,
            "congestion_events": self.congestion_events,
            "congestion_threshold": self.congestion_threshold
        }


class BufferManager:
    """Manages memory buffers for data transfer."""
    
    def __init__(self, max_memory: int = 100 * 1024 * 1024):  # 100MB default
        """Initialize buffer manager.
        
        Args:
            max_memory: Maximum memory to use in bytes
        """
        self.max_memory = max_memory
        self.current_usage = 0
        
        # Ring buffer for streaming data
        self.ring_buffer = bytearray(RING_BUFFER_SIZE)
        self.read_pos = 0
        self.write_pos = 0
        self.buffer_size = 0
        
        # LRU cache for chunks
        self.chunk_cache: OrderedDict[int, bytes] = OrderedDict()
        self.cache_size = 0
        
        # Memory pressure callbacks
        self.pressure_callback: Optional[Callable] = None
        
        logger.info(f"Buffer manager initialized with {max_memory / 1024 / 1024:.1f}MB limit")
    
    def write_to_ring(self, data: bytes) -> bool:
        """Write data to ring buffer.
        
        Args:
            data: Data to write
            
        Returns:
            True if successful, False if buffer full
        """
        data_len = len(data)
        
        # Check if buffer has space
        if self.buffer_size + data_len > RING_BUFFER_SIZE:
            if self.buffer_size / RING_BUFFER_SIZE > HIGH_WATER_MARK:
                logger.warning("Ring buffer high water mark reached")
                if self.pressure_callback:
                    self.pressure_callback()
            return False
        
        # Write data to ring buffer
        for byte in data:
            self.ring_buffer[self.write_pos] = byte
            self.write_pos = (self.write_pos + 1) % RING_BUFFER_SIZE
        
        self.buffer_size += data_len
        self.current_usage += data_len
        
        return True
    
    def read_from_ring(self, size: int) -> Optional[bytes]:
        """Read data from ring buffer.
        
        Args:
            size: Number of bytes to read
            
        Returns:
            Data bytes or None if not enough data
        """
        if self.buffer_size < size:
            return None
        
        # Read data from ring buffer
        data = bytearray(size)
        for i in range(size):
            data[i] = self.ring_buffer[self.read_pos]
            self.read_pos = (self.read_pos + 1) % RING_BUFFER_SIZE
        
        self.buffer_size -= size
        self.current_usage -= size
        
        # Check low water mark
        if self.buffer_size / RING_BUFFER_SIZE < LOW_WATER_MARK:
            logger.debug("Ring buffer low water mark reached")
        
        return bytes(data)
    
    def cache_chunk(self, chunk_id: int, data: bytes) -> bool:
        """Cache a data chunk.
        
        Args:
            chunk_id: Chunk identifier
            data: Chunk data
            
        Returns:
            True if cached successfully
        """
        data_size = len(data)
        
        # Check memory limit
        if self.current_usage + data_size > self.max_memory:
            # Try to free memory
            self._evict_lru_chunks(data_size)
            
            if self.current_usage + data_size > self.max_memory:
                logger.warning("Cannot cache chunk: memory limit exceeded")
                return False
        
        # Add to cache (move to end if exists)
        if chunk_id in self.chunk_cache:
            self.chunk_cache.move_to_end(chunk_id)
        else:
            self.chunk_cache[chunk_id] = data
            self.cache_size += data_size
            self.current_usage += data_size
        
        return True
    
    def get_cached_chunk(self, chunk_id: int) -> Optional[bytes]:
        """Get cached chunk.
        
        Args:
            chunk_id: Chunk identifier
            
        Returns:
            Chunk data or None if not cached
        """
        if chunk_id in self.chunk_cache:
            # Move to end (most recently used)
            self.chunk_cache.move_to_end(chunk_id)
            return self.chunk_cache[chunk_id]
        return None
    
    def _evict_lru_chunks(self, required_space: int):
        """Evict least recently used chunks.
        
        Args:
            required_space: Space needed in bytes
        """
        freed_space = 0
        chunks_to_remove = []
        
        for chunk_id, data in self.chunk_cache.items():
            if freed_space >= required_space:
                break
            chunks_to_remove.append(chunk_id)
            freed_space += len(data)
        
        for chunk_id in chunks_to_remove:
            data = self.chunk_cache.pop(chunk_id)
            self.cache_size -= len(data)
            self.current_usage -= len(data)
        
        if chunks_to_remove:
            logger.debug(f"Evicted {len(chunks_to_remove)} chunks to free {freed_space} bytes")
    
    def clear(self):
        """Clear all buffers."""
        self.ring_buffer = bytearray(RING_BUFFER_SIZE)
        self.read_pos = 0
        self.write_pos = 0
        self.buffer_size = 0
        
        self.chunk_cache.clear()
        self.cache_size = 0
        self.current_usage = 0
        
        # Force garbage collection
        gc.collect()
        
        logger.info("Buffers cleared")
    
    def get_stats(self) -> Dict[str, Any]:
        """Get buffer statistics.
        
        Returns:
            Statistics dictionary
        """
        return {
            "current_usage": self.current_usage,
            "max_memory": self.max_memory,
            "usage_percent": (self.current_usage / self.max_memory * 100) if self.max_memory > 0 else 0,
            "ring_buffer_size": self.buffer_size,
            "ring_buffer_percent": (self.buffer_size / RING_BUFFER_SIZE * 100),
            "cached_chunks": len(self.chunk_cache),
            "cache_size": self.cache_size
        }


class TransferOptimizer:
    """Optimizes data transfer with compression and scheduling."""
    
    def __init__(self, compression_enabled: bool = True):
        """Initialize transfer optimizer.
        
        Args:
            compression_enabled: Enable compression
        """
        self.compression_enabled = compression_enabled
        self.compression_stats = {
            "compressed_chunks": 0,
            "original_size": 0,
            "compressed_size": 0
        }
        
        # Transfer scheduling
        self.transfer_queue: Dict[TransferPriority, deque] = {
            TransferPriority.HIGH: deque(),
            TransferPriority.NORMAL: deque(),
            TransferPriority.LOW: deque()
        }
        
        logger.info(f"Transfer optimizer initialized (compression: {compression_enabled})")
    
    def compress_chunk(self, data: bytes) -> Tuple[bytes, bool]:
        """Compress data chunk if beneficial.
        
        Args:
            data: Data to compress
            
        Returns:
            Tuple of (data, compressed_flag)
        """
        if not self.compression_enabled or len(data) < COMPRESSION_THRESHOLD:
            return data, False
        
        try:
            compressed = zlib.compress(data, COMPRESSION_LEVEL)
            
            # Check if compression is beneficial
            compression_ratio = len(compressed) / len(data)
            if compression_ratio < 0.9:  # At least 10% reduction
                self.compression_stats["compressed_chunks"] += 1
                self.compression_stats["original_size"] += len(data)
                self.compression_stats["compressed_size"] += len(compressed)
                return compressed, True
            
        except Exception as e:
            logger.error(f"Compression failed: {e}")
        
        return data, False
    
    def decompress_chunk(self, data: bytes) -> bytes:
        """Decompress data chunk.
        
        Args:
            data: Compressed data
            
        Returns:
            Decompressed data
        """
        try:
            return zlib.decompress(data)
        except Exception as e:
            logger.error(f"Decompression failed: {e}")
            return data
    
    def schedule_transfer(self, session: TransferSession):
        """Schedule transfer based on priority.
        
        Args:
            session: Transfer session to schedule
        """
        self.transfer_queue[session.priority].append(session)
    
    def get_next_transfer(self) -> Optional[TransferSession]:
        """Get next transfer to process.
        
        Returns:
            Next transfer session or None
        """
        # Check queues in priority order
        for priority in [TransferPriority.HIGH, TransferPriority.NORMAL, TransferPriority.LOW]:
            if self.transfer_queue[priority]:
                return self.transfer_queue[priority].popleft()
        return None
    
    def get_compression_ratio(self) -> float:
        """Get overall compression ratio.
        
        Returns:
            Compression ratio (0-1, lower is better)
        """
        if self.compression_stats["original_size"] > 0:
            return self.compression_stats["compressed_size"] / self.compression_stats["original_size"]
        return 1.0
    
    def get_stats(self) -> Dict[str, Any]:
        """Get optimizer statistics.
        
        Returns:
            Statistics dictionary
        """
        return {
            "compression_enabled": self.compression_enabled,
            "compressed_chunks": self.compression_stats["compressed_chunks"],
            "compression_ratio": self.get_compression_ratio(),
            "bytes_saved": self.compression_stats["original_size"] - self.compression_stats["compressed_size"],
            "queue_high": len(self.transfer_queue[TransferPriority.HIGH]),
            "queue_normal": len(self.transfer_queue[TransferPriority.NORMAL]),
            "queue_low": len(self.transfer_queue[TransferPriority.LOW])
        }


class LargeDataHandler:
    """Main handler for large data transfers over BLE."""
    
    def __init__(self, fragmentation_protocol: FragmentationProtocol,
                 max_memory: int = 100 * 1024 * 1024):
        """Initialize large data handler.
        
        Args:
            fragmentation_protocol: Underlying fragmentation protocol
            max_memory: Maximum memory usage in bytes
        """
        self.protocol = fragmentation_protocol
        self.flow_controller = FlowController()
        self.buffer_manager = BufferManager(max_memory)
        self.optimizer = TransferOptimizer()
        
        # Active sessions
        self.sessions: Dict[str, TransferSession] = {}
        self.current_session: Optional[TransferSession] = None
        
        # Performance metrics
        self.metrics = PerformanceMetrics()
        
        # Callbacks
        self.progress_callback: Optional[Callable[[str, float], None]] = None
        self.completion_callback: Optional[Callable[[str, bool], None]] = None
        
        # Set buffer pressure callback
        self.buffer_manager.pressure_callback = self._on_buffer_pressure
        
        logger.info("Large data handler initialized")
    
    def create_chunks(self, data: bytes) -> List[ChunkMetadata]:
        """Create chunks from data.
        
        Args:
            data: Data to chunk
            
        Returns:
            List of chunk metadata
        """
        chunks = []
        offset = 0
        chunk_id = 0
        
        while offset < len(data):
            chunk_size = min(CHUNK_SIZE, len(data) - offset)
            chunk_data = data[offset:offset + chunk_size]
            
            # Calculate checksum
            checksum = hashlib.sha256(chunk_data).hexdigest()[:16]
            
            # Compress if beneficial
            compressed_data, is_compressed = self.optimizer.compress_chunk(chunk_data)
            
            # Create metadata
            metadata = ChunkMetadata(
                chunk_id=chunk_id,
                offset=offset,
                size=len(compressed_data),
                checksum=checksum,
                compressed=is_compressed,
                original_size=chunk_size
            )
            
            chunks.append(metadata)
            
            # Cache chunk data
            self.buffer_manager.cache_chunk(chunk_id, compressed_data)
            
            offset += chunk_size
            chunk_id += 1
        
        logger.debug(f"Created {len(chunks)} chunks from {len(data)} bytes")
        return chunks
    
    async def send_data(self, data: bytes, session_id: str,
                       priority: TransferPriority = TransferPriority.NORMAL) -> bool:
        """Send large data.
        
        Args:
            data: Data to send
            session_id: Session identifier
            priority: Transfer priority
            
        Returns:
            True if successful
        """
        try:
            # Create session
            session = TransferSession(
                session_id=session_id,
                file_path=None,
                total_size=len(data),
                priority=priority,
                compression_enabled=self.optimizer.compression_enabled
            )
            
            # Create chunks
            chunks = self.create_chunks(data)
            session.chunks_total = len(chunks)
            
            self.sessions[session_id] = session
            self.current_session = session
            
            # Send chunks with flow control
            for chunk_meta in chunks:
                # Wait for flow control
                while not self.flow_controller.can_send():
                    await asyncio.sleep(0.01)
                
                # Get chunk data
                chunk_data = self.buffer_manager.get_cached_chunk(chunk_meta.chunk_id)
                if not chunk_data:
                    logger.error(f"Chunk {chunk_meta.chunk_id} not in cache")
                    return False
                
                # Send through fragmentation protocol
                self.flow_controller.on_send()
                start_time = time.time()
                
                success = await self.protocol.send(chunk_data)
                
                if success:
                    rtt = time.time() - start_time
                    self.flow_controller.on_ack(rtt)
                    
                    session.chunks_completed.append(chunk_meta.chunk_id)
                    session.transferred += chunk_meta.original_size
                    
                    self.metrics.chunks_sent += 1
                    self.metrics.bytes_sent += chunk_meta.original_size
                    
                    # Report progress
                    if self.progress_callback:
                        self.progress_callback(session_id, session.progress)
                else:
                    self.flow_controller.on_timeout()
                    logger.error(f"Failed to send chunk {chunk_meta.chunk_id}")
                    return False
            
            # Mark complete
            if self.completion_callback:
                self.completion_callback(session_id, True)
            
            logger.info(f"Transfer {session_id} complete: {len(data)} bytes in {len(chunks)} chunks")
            return True
            
        except Exception as e:
            logger.error(f"Send error: {e}")
            if self.completion_callback:
                self.completion_callback(session_id, False)
            return False
    
    async def send_file(self, file_path: str, session_id: str,
                       priority: TransferPriority = TransferPriority.NORMAL,
                       resume_session: Optional[str] = None) -> bool:
        """Send file with optional resume.
        
        Args:
            file_path: Path to file
            session_id: Session identifier
            priority: Transfer priority
            resume_session: Previous session to resume
            
        Returns:
            True if successful
        """
        try:
            # Check for resume
            start_offset = 0
            if resume_session and resume_session in self.sessions:
                old_session = self.sessions[resume_session]
                start_offset = old_session.transferred
                logger.info(f"Resuming transfer from offset {start_offset}")
            
            # Read file
            with open(file_path, 'rb') as f:
                f.seek(start_offset)
                data = f.read()
            
            # Create session
            session = TransferSession(
                session_id=session_id,
                file_path=file_path,
                total_size=os.path.getsize(file_path),
                transferred=start_offset,
                priority=priority
            )
            
            # Send data
            return await self.send_data(data, session_id, priority)
            
        except Exception as e:
            logger.error(f"File send error: {e}")
            return False
    
    def receive_chunk(self, chunk_data: bytes, chunk_meta: ChunkMetadata) -> bool:
        """Receive and process chunk.
        
        Args:
            chunk_data: Chunk data
            chunk_meta: Chunk metadata
            
        Returns:
            True if processed successfully
        """
        try:
            # Decompress if needed
            if chunk_meta.compressed:
                chunk_data = self.optimizer.decompress_chunk(chunk_data)
            
            # Verify checksum
            checksum = hashlib.sha256(chunk_data).hexdigest()[:16]
            if checksum != chunk_meta.checksum:
                logger.error(f"Checksum mismatch for chunk {chunk_meta.chunk_id}")
                return False
            
            # Cache chunk
            self.buffer_manager.cache_chunk(chunk_meta.chunk_id, chunk_data)
            
            # Update metrics
            self.metrics.chunks_received += 1
            self.metrics.bytes_received += len(chunk_data)
            
            return True
            
        except Exception as e:
            logger.error(f"Receive chunk error: {e}")
            return False
    
    def get_session(self, session_id: str) -> Optional[TransferSession]:
        """Get transfer session.
        
        Args:
            session_id: Session identifier
            
        Returns:
            Transfer session or None
        """
        return self.sessions.get(session_id)
    
    def cancel_transfer(self, session_id: str) -> bool:
        """Cancel active transfer.
        
        Args:
            session_id: Session to cancel
            
        Returns:
            True if cancelled
        """
        if session_id in self.sessions:
            del self.sessions[session_id]
            if self.current_session and self.current_session.session_id == session_id:
                self.current_session = None
                self.protocol.cancel()
            
            logger.info(f"Transfer {session_id} cancelled")
            return True
        return False
    
    def _on_buffer_pressure(self):
        """Handle buffer pressure event."""
        logger.warning("Buffer pressure detected, throttling transfer")
        # Reduce flow control window
        self.flow_controller.window_size = max(
            self.flow_controller.min_window,
            self.flow_controller.window_size // 2
        )
    
    def get_stats(self) -> Dict[str, Any]:
        """Get handler statistics.
        
        Returns:
            Complete statistics
        """
        return {
            "metrics": {
                "throughput": self.metrics.throughput,
                "efficiency": self.metrics.efficiency,
                "bytes_sent": self.metrics.bytes_sent,
                "bytes_received": self.metrics.bytes_received,
                "chunks_sent": self.metrics.chunks_sent,
                "chunks_received": self.metrics.chunks_received
            },
            "flow_control": self.flow_controller.get_stats(),
            "buffer": self.buffer_manager.get_stats(),
            "optimizer": self.optimizer.get_stats(),
            "sessions": {
                "active": len(self.sessions),
                "current": self.current_session.session_id if self.current_session else None
            }
        }
    
    async def cleanup(self):
        """Clean up resources."""
        try:
            # Clear buffers
            self.buffer_manager.clear()
            
            # Clear sessions
            self.sessions.clear()
            self.current_session = None
            
            logger.info("Large data handler cleaned up")
            
        except Exception as e:
            logger.error(f"Cleanup error: {e}")


# Example usage
if __name__ == "__main__":
    import asyncio
    import uuid
    
    async def test_large_data():
        """Test large data handler."""
        # Create fragmentation protocol
        protocol = FragmentationProtocol()
        
        # Create large data handler
        handler = LargeDataHandler(protocol)
        
        # Set callbacks
        def on_progress(session_id: str, progress: float):
            print(f"Transfer {session_id}: {progress:.1f}%")
        
        def on_complete(session_id: str, success: bool):
            print(f"Transfer {session_id} {'completed' if success else 'failed'}")
        
        handler.progress_callback = on_progress
        handler.completion_callback = on_complete
        
        # Test data (100KB)
        test_data = b"X" * (100 * 1024)
        session_id = str(uuid.uuid4())
        
        print(f"Sending {len(test_data)} bytes...")
        
        # Send data
        success = await handler.send_data(
            test_data,
            session_id,
            TransferPriority.NORMAL
        )
        
        print(f"Send {'successful' if success else 'failed'}")
        
        # Get statistics
        stats = handler.get_stats()
        print(f"\nStatistics:")
        print(f"  Throughput: {stats['metrics']['throughput']:.2f} B/s")
        print(f"  Efficiency: {stats['metrics']['efficiency']:.2%}")
        print(f"  Compression ratio: {stats['optimizer']['compression_ratio']:.2%}")
        print(f"  Memory usage: {stats['buffer']['usage_percent']:.1f}%")
        
        # Cleanup
        await handler.cleanup()
    
    # Run test
    asyncio.run(test_large_data())