#!/usr/bin/env python3

import os
import threading
import time
import cv2
import numpy as np
from ultralytics import YOLO
from logger import setup_logger
from collections import deque
import weakref
from concurrent.futures import ThreadPoolExecutor, Future
import asyncio
from typing import Optional, Tuple, List, Any
import multiprocessing as mp
from queue import Queue, Empty, Full
import gc

logger = setup_logger()

# Optimized configuration
SEG_MODEL_PATH = "./merge_seg_rknn_model"
POSE_MODEL_PATH = "./merge_pose_rknn_model"
SEG_CORE = 0
POSE_CORE = 1

# Performance-focused settings
BATCH_SIZE = 8  # Larger batch for better NPU utilization
PREFETCH_SIZE = 16  # Prefetch more frames
MAX_CONCURRENT_TASKS = 32  # Higher concurrency
RESULT_TIMEOUT = 0.1  # Aggressive timeout (100ms)

class StreamingNPUWorker:
    """Streaming NPU worker optimized for maximum throughput"""
    
    def __init__(self, core_id: int, model_path: str, task_type: str):
        self.core_id = core_id
        self.task_type = task_type
        self.model_path = model_path
        
        # Pre-allocated arrays for batch processing
        self.batch_images = []
        self.batch_futures = []
        self.batch_ready = threading.Event()
        
        # Lock-free ring buffer for tasks
        self.task_buffer = deque(maxlen=MAX_CONCURRENT_TASKS)
        self.result_buffer = deque(maxlen=MAX_CONCURRENT_TASKS)
        self.buffer_lock = threading.RLock()
        
        # Single dedicated thread for this worker
        self.worker_thread = None
        self.running = False
        self.model = None
        self.model_ready = threading.Event()
        
        # Minimal stats
        self.processed_count = 0
        self.last_fps_time = time.time()
        self.current_fps = 0.0
        
        self._start_worker()
    
    def _start_worker(self):
        """Start the dedicated worker thread"""
        self.running = True
        self.worker_thread = threading.Thread(
            target=self._worker_loop,
            name=f"NPU-{self.core_id}-{self.task_type}",
            daemon=True
        )
        self.worker_thread.start()
    
    def _load_model(self) -> bool:
        """Load and warm up the model"""
        try:
            # Set NPU core affinity
            os.environ['RKNN_SERVER_CORE_MASK'] = str(1 << self.core_id)
            
            # Load model
            if self.task_type == "segment":
                self.model = YOLO(self.model_path, task='segment')
            else:
                self.model = YOLO(self.model_path, task='pose')
            
            # Warm up with dummy data
            dummy_img = np.zeros((640, 640, 3), dtype=np.uint8)
            _ = self.model.predict(dummy_img, save=False, verbose=False)
            
            self.model_ready.set()
            logger.info(f"NPU Core {self.core_id} ({self.task_type}) ready")
            return True
            
        except Exception as e:
            logger.error(f"Model loading failed for core {self.core_id}: {e}")
            return False
    
    def _worker_loop(self):
        """Optimized worker loop with batch processing"""
        if not self._load_model():
            return
        
        batch_buffer = []
        last_process_time = time.time()
        batch_timeout = 0.005  # 5ms batch timeout
        
        while self.running:
            current_time = time.time()
            
            # Collect tasks for batching
            with self.buffer_lock:
                while len(batch_buffer) < BATCH_SIZE and self.task_buffer:
                    batch_buffer.append(self.task_buffer.popleft())
            
            # Process batch if ready
            should_process = (
                len(batch_buffer) >= BATCH_SIZE or
                (len(batch_buffer) > 0 and (current_time - last_process_time) > batch_timeout)
            )
            
            if should_process and batch_buffer:
                self._process_batch_streaming(batch_buffer)
                batch_buffer.clear()
                last_process_time = current_time
                
                # Update FPS
                self.processed_count += len(batch_buffer)
                if current_time - self.last_fps_time >= 1.0:
                    self.current_fps = self.processed_count / (current_time - self.last_fps_time)
                    self.processed_count = 0
                    self.last_fps_time = current_time
            
            # Minimal sleep to avoid busy waiting
            if not batch_buffer:
                time.sleep(0.0001)
    
    def _process_batch_streaming(self, tasks: List[dict]):
        """Process batch with streaming optimization"""
        if not tasks:
            return
        
        try:
            # For YOLO, we process images individually but in rapid succession
            # This maintains compatibility while maximizing throughput
            results = []
            for task in tasks:
                img = task['img']
                start_time = time.time()
                
                result = self.model.predict(
                    img, 
                    save=False, 
                    verbose=False, 
                    device=f'npu:{self.core_id}'
                )
                
                inference_time = time.time() - start_time
                processed_result = self._extract_result(result[0])
                
                results.append({
                    'task_id': task['task_id'],
                    'result': processed_result,
                    'inference_time': inference_time,
                    'success': processed_result is not None
                })
            
            # Store results in buffer
            with self.buffer_lock:
                for result in results:
                    if len(self.result_buffer) < self.result_buffer.maxlen:
                        self.result_buffer.append(result)
        
        except Exception as e:
            logger.error(f"Batch processing error on core {self.core_id}: {e}")
            # Send error results
            with self.buffer_lock:
                for task in tasks:
                    self.result_buffer.append({
                        'task_id': task['task_id'],
                        'result': None,
                        'inference_time': 0,
                        'success': False,
                        'error': str(e)
                    })
    
    def _extract_result(self, result) -> Optional[Any]:
        """Fast result extraction"""
        try:
            if self.task_type == "segment":
                if hasattr(result, 'masks') and result.masks is not None:
                    masks_xy = result.masks.xy
                    return masks_xy[0] if len(masks_xy) > 0 else None
            else:  # pose
                if hasattr(result, 'keypoints') and result.keypoints is not None:
                    return result.keypoints
            return None
        except:
            return None
    
    def submit_task(self, img: np.ndarray, task_id: int) -> bool:
        """Submit task for processing"""
        task = {
            'img': img,
            'task_id': task_id,
            'submit_time': time.time()
        }
        
        with self.buffer_lock:
            if len(self.task_buffer) < self.task_buffer.maxlen:
                self.task_buffer.append(task)
                return True
            return False
    
    def get_result(self, task_id: int, timeout: float = RESULT_TIMEOUT) -> Optional[dict]:
        """Get result for specific task"""
        deadline = time.time() + timeout
        
        while time.time() < deadline:
            with self.buffer_lock:
                for i, result in enumerate(self.result_buffer):
                    if result['task_id'] == task_id:
                        return self.result_buffer.popleft()  # Remove and return
            
            time.sleep(0.0001)  # Minimal sleep
        
        return None  # Timeout
    
    def wait_ready(self, timeout: float = 10.0) -> bool:
        """Wait for model to be ready"""
        return self.model_ready.wait(timeout)
    
    def stop(self):
        """Stop the worker"""
        self.running = False
        if self.worker_thread and self.worker_thread.is_alive():
            self.worker_thread.join(timeout=2.0)


class UltraFastSegPose:
    """Ultra-high performance SegPose with streaming pipeline"""
    
    def __init__(self):
        logger.info("Initializing UltraFast SegPose...")
        
        # Create workers
        self.seg_worker = StreamingNPUWorker(SEG_CORE, SEG_MODEL_PATH, "segment")
        self.pose_worker = StreamingNPUWorker(POSE_CORE, POSE_MODEL_PATH, "pose")
        
        # Wait for workers to be ready
        if not self.seg_worker.wait_ready(15):
            raise RuntimeError("Segmentation worker failed to initialize")
        if not self.pose_worker.wait_ready(15):
            raise RuntimeError("Pose worker failed to initialize")
        
        # Task management
        self._task_counter = 0
        self._counter_lock = threading.Lock()
        
        # Performance tracking
        self._total_requests = 0
        self._successful_requests = 0
        self._start_time = time.time()
        self._fps_window = deque(maxlen=50)
        
        # Prefetching support
        self._prefetch_queue = Queue(maxsize=PREFETCH_SIZE)
        self._prefetch_thread = None
        self._enable_prefetch = False
        
        logger.info("UltraFast SegPose initialized successfully")
    
    def _get_next_task_id(self) -> int:
        """Get next task ID"""
        with self._counter_lock:
            self._task_counter += 1
            return self._task_counter
    
    def get_result(self, img: np.ndarray) -> Tuple[Optional[Any], Optional[Any]]:
        """Get results with maximum performance"""
        start_time = time.time()
        
        # Get task ID
        task_id = self._get_next_task_id()
        
        # Submit to both workers simultaneously
        seg_submitted = self.seg_worker.submit_task(img, task_id)
        pose_submitted = self.pose_worker.submit_task(img, task_id)
        
        if not (seg_submitted and pose_submitted):
            logger.warning(f"Task submission failed: seg={seg_submitted}, pose={pose_submitted}")
            return None, None
        
        # Collect results with aggressive timeout
        seg_result = self.seg_worker.get_result(task_id, RESULT_TIMEOUT)
        pose_result = self.pose_worker.get_result(task_id, RESULT_TIMEOUT)
        
        # Update stats
        self._total_requests += 1
        if seg_result and pose_result and seg_result['success'] and pose_result['success']:
            self._successful_requests += 1
            self._fps_window.append(time.time())
        
        # Extract actual results
        seg_data = seg_result['result'] if seg_result and seg_result['success'] else None
        pose_data = pose_result['result'] if pose_result and pose_result['success'] else None
        
        return seg_data, pose_data
    
    def get_result_async(self, img: np.ndarray) -> Future:
        """Async version for even higher throughput"""
        executor = ThreadPoolExecutor(max_workers=1)
        return executor.submit(self.get_result, img)
    
    def enable_prefetch(self, image_provider):
        """Enable prefetching for continuous processing"""
        self._enable_prefetch = True
        self._prefetch_thread = threading.Thread(
            target=self._prefetch_loop,
            args=(image_provider,),
            daemon=True
        )
        self._prefetch_thread.start()
    
    def _prefetch_loop(self, image_provider):
        """Prefetch images in background"""
        while self._enable_prefetch:
            try:
                image_data = image_provider.get_next_image()
                if image_data is not None:
                    self._prefetch_queue.put(image_data, timeout=0.1)
                else:
                    time.sleep(0.01)
            except Full:
                time.sleep(0.01)
            except Exception as e:
                logger.error(f"Prefetch error: {e}")
                break
    
    def get_prefetched_result(self) -> Tuple[Any, Optional[Any], Optional[Any]]:
        """Get result from prefetched image"""
        try:
            image_data = self._prefetch_queue.get(timeout=0.1)
            seg_result, pose_result = self.get_result(image_data.color_img)
            return image_data, seg_result, pose_result
        except Empty:
            return None, None, None
    
    def get_stats(self) -> dict:
        """Get performance statistics"""
        current_time = time.time()
        uptime = current_time - self._start_time
        
        # Calculate current FPS
        current_fps = 0.0
        if len(self._fps_window) >= 2:
            time_span = self._fps_window[-1] - self._fps_window[0]
            if time_span > 0:
                current_fps = (len(self._fps_window) - 1) / time_span
        
        return {
            'total_requests': self._total_requests,
            'successful_requests': self._successful_requests,
            'success_rate': self._successful_requests / max(1, self._total_requests),
            'uptime': uptime,
            'avg_fps': self._successful_requests / max(1, uptime),
            'current_fps': current_fps,
            'seg_worker_fps': self.seg_worker.current_fps,
            'pose_worker_fps': self.pose_worker.current_fps
        }
    
    def shutdown(self):
        """Clean shutdown"""
        logger.info("Shutting down UltraFast SegPose...")
        
        # Stop prefetching
        self._enable_prefetch = False
        if self._prefetch_thread:
            self._prefetch_thread.join(timeout=1.0)
        
        # Print final stats
        stats = self.get_stats()
        logger.info(f"Final Stats - Total: {stats['total_requests']}, "
                   f"Success Rate: {stats['success_rate']*100:.1f}%, "
                   f"Avg FPS: {stats['avg_fps']:.2f}")
        
        # Stop workers
        self.seg_worker.stop()
        self.pose_worker.stop()
        
        # Force garbage collection
        gc.collect()
        
        logger.info("UltraFast SegPose shutdown complete")


# Backward compatibility
class SegPose(UltraFastSegPose):
    """Backward compatible alias"""
    pass


class HighPerformanceSegPose(UltraFastSegPose):
    """Another backward compatible alias"""
    pass


# Performance testing function
def benchmark_segpose(test_dir: str = "./data/Color1", rounds: int = 3):
    """Benchmark the SegPose implementation"""
    logger.info("Starting SegPose benchmark...")
    
    # Initialize
    seg_pose = UltraFastSegPose()
    
    # Load test images
    image_files = [f for f in os.listdir(test_dir) 
                   if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
    
    if not image_files:
        logger.error(f"No images found in {test_dir}")
        return
    
    # Preload images
    test_images = []
    for img_file in image_files[300:400]:  # Limit to 50 images for testing
        img_path = os.path.join(test_dir, img_file)
        img = cv2.imread(img_path)
        if img is not None:
            test_images.append(img)
    
    logger.info(f"Loaded {len(test_images)} test images")
    
    # Warm-up
    logger.info("Warming up...")
    for img in test_images[:5]:
        seg_pose.get_result(img)
    
    # Benchmark
    total_processed = 0
    total_time = 0
    
    for round_num in range(rounds):
        logger.info(f"Round {round_num + 1}/{rounds}")
        round_start = time.time()
        
        for img in test_images:
            start = time.time()
            seg_result, pose_result = seg_pose.get_result(img)
            end = time.time()
            
            total_processed += 1
            total_time += (end - start)
        
        round_time = time.time() - round_start
        round_fps = len(test_images) / round_time
        logger.info(f"Round {round_num + 1} FPS: {round_fps:.2f}")
    
    # Final results
    avg_fps = total_processed / total_time if total_time > 0 else 0
    stats = seg_pose.get_stats()
    
    logger.info(f"\nBenchmark Results:")
    logger.info(f"Total processed: {total_processed}")
    logger.info(f"Average FPS: {avg_fps:.2f}")
    logger.info(f"System FPS: {stats['avg_fps']:.2f}")
    logger.info(f"Success rate: {stats['success_rate']*100:.1f}%")
    
    seg_pose.shutdown()


if __name__ == "__main__":
    benchmark_segpose()