"""
并发管理器
解决文件系统通信的并发安全问题
"""

import threading
import uuid
import time
import fcntl
import tempfile
from pathlib import Path
from typing import Dict, Any, Optional
from contextlib import contextmanager
import logging

class ConcurrentManager:
    """并发安全的请求管理器"""
    
    def __init__(self, base_dir: Optional[Path] = None):
        self.logger = logging.getLogger(__name__)
        self.base_dir = base_dir or Path(tempfile.gettempdir()) / "md2ppt_communication"
        self.base_dir.mkdir(exist_ok=True)
        
        # 使用线程安全的数据结构
        self._active_requests: Dict[str, Dict[str, Any]] = {}
        self._lock = threading.RLock()
        
        # 清理线程
        self._cleanup_thread = threading.Thread(target=self._cleanup_worker, daemon=True)
        self._cleanup_thread.start()
    
    def generate_safe_request_id(self) -> str:
        """生成线程安全的唯一请求ID"""
        return f"req_{uuid.uuid4().hex}_{int(time.time())}"
    
    @contextmanager
    def acquire_request_slot(self, request_id: str, timeout: int = 300):
        """获取请求槽位（确保并发安全）"""
        with self._lock:
            if request_id in self._active_requests:
                raise ValueError(f"请求ID已存在: {request_id}")
            
            # 创建请求目录
            request_dir = self.base_dir / request_id
            request_dir.mkdir(exist_ok=True)
            
            # 创建锁文件
            lock_file = request_dir / "request.lock"
            
            try:
                # 记录活跃请求
                self._active_requests[request_id] = {
                    "start_time": time.time(),
                    "timeout": timeout,
                    "dir": request_dir,
                    "lock_file": lock_file
                }
                
                # 创建并锁定文件
                with open(lock_file, 'w') as f:
                    fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
                    f.write(f"Request {request_id} started at {time.time()}")
                    
                    self.logger.debug(f"获取请求槽位: {request_id}")
                    yield request_dir
                    
            except BlockingIOError:
                raise RuntimeError(f"无法获取请求锁: {request_id}")
            finally:
                # 清理请求
                self._cleanup_request(request_id)
    
    def _cleanup_request(self, request_id: str):
        """清理单个请求"""
        with self._lock:
            if request_id in self._active_requests:
                request_info = self._active_requests.pop(request_id)
                request_dir = request_info["dir"]
                
                try:
                    # 删除请求目录及所有文件
                    if request_dir.exists():
                        for file_path in request_dir.rglob("*"):
                            if file_path.is_file():
                                file_path.unlink(missing_ok=True)
                        request_dir.rmdir()
                    
                    self.logger.debug(f"清理请求: {request_id}")
                except Exception as e:
                    self.logger.warning(f"清理请求失败 {request_id}: {e}")
    
    def _cleanup_worker(self):
        """后台清理工作线程"""
        while True:
            try:
                current_time = time.time()
                expired_requests = []
                
                with self._lock:
                    for request_id, request_info in self._active_requests.items():
                        if current_time - request_info["start_time"] > request_info["timeout"]:
                            expired_requests.append(request_id)
                
                # 清理过期请求
                for request_id in expired_requests:
                    self.logger.warning(f"清理超时请求: {request_id}")
                    self._cleanup_request(request_id)
                
                # 每30秒检查一次
                time.sleep(30)
                
            except Exception as e:
                self.logger.error(f"清理工作线程异常: {e}")
                time.sleep(60)  # 异常时延长等待时间
    
    def get_active_request_count(self) -> int:
        """获取活跃请求数量"""
        with self._lock:
            return len(self._active_requests)
    
    def force_cleanup_all(self):
        """强制清理所有请求"""
        with self._lock:
            request_ids = list(self._active_requests.keys())
            
        for request_id in request_ids:
            self._cleanup_request(request_id)
        
        self.logger.info(f"强制清理了 {len(request_ids)} 个请求")

# 全局并发管理器实例
_concurrent_manager = None

def get_concurrent_manager() -> ConcurrentManager:
    """获取全局并发管理器实例"""
    global _concurrent_manager
    if _concurrent_manager is None:
        _concurrent_manager = ConcurrentManager()
    return _concurrent_manager