import os
import threading
import time
from queue import Queue
from typing import Callable, Dict, List, Optional

from watchdog.events import FileSystemEventHandler, FileSystemEvent
from watchdog.observers import Observer

from .ignore import is_ignored
import shutil


class DebouncedHandler(FileSystemEventHandler):
    def __init__(self, source_dir: str, ignore_patterns: List[str], on_batch: Callable[[List[str]], None], stop_event: threading.Event, debounce_seconds: float = 0.5) -> None:
        super().__init__()
        self.source_dir = os.path.abspath(source_dir)
        self.ignore_patterns = ignore_patterns
        self.on_batch = on_batch
        self.stop_event = stop_event
        self.debounce_seconds = debounce_seconds
        self._queue: "Queue[str]" = Queue()
        self._timer: Optional[threading.Timer] = None
        self._lock = threading.Lock()

    def _enqueue(self, path: str) -> None:
        if self.stop_event.is_set():
            # 静默返回，避免日志刷屏
            return
        rel = os.path.relpath(path, self.source_dir).replace("\\", "/")
        if rel.startswith(".."):
            return
        if is_ignored(rel, self.ignore_patterns):
            return
        self._queue.put(rel)
        with self._lock:
            if self._timer:
                self._timer.cancel()
            self._timer = threading.Timer(self.debounce_seconds, self._flush)
            self._timer.daemon = True
            self._timer.start()

    def _flush(self) -> None:
        if self.stop_event.is_set():
            # Cancel any pending timer when stopping
            with self._lock:
                if self._timer:
                    self._timer.cancel()
                    self._timer = None
            return
        batch: List[str] = []
        while not self._queue.empty():
            try:
                batch.append(self._queue.get_nowait())
            except Exception:
                break
        if batch:
            self.on_batch(sorted(set(batch)))

    def on_created(self, event: FileSystemEvent) -> None:
        self._enqueue(event.src_path)

    def on_modified(self, event: FileSystemEvent) -> None:
        if event.is_directory:
            return
        self._enqueue(event.src_path)

    def on_moved(self, event: FileSystemEvent) -> None:
        self._enqueue(event.src_path)
        self._enqueue(event.dest_path)

    def on_deleted(self, event: FileSystemEvent) -> None:
        self._enqueue(event.src_path)


def start_realtime_sync(source_dir: str, target_dirs: List[str], ignore_patterns: List[str], stop_event: threading.Event, logger: Callable[[str], None], on_stats: Optional[Callable[[int], None]] = None) -> Observer:
    source_dir = os.path.abspath(source_dir)

    def handle_batch(rel_paths: List[str]) -> None:
        logger(f"[HANDLE_BATCH] 处理批次，共{len(rel_paths)}个文件")
        if stop_event.is_set():
            logger("[HANDLE_BATCH] stop_event已设置，退出")
            return
        for rel in rel_paths:
            if stop_event.is_set():
                break
            src = os.path.join(source_dir, rel)
            for target_dir in target_dirs:
                if stop_event.is_set():
                    break
                dst = os.path.join(target_dir, rel)
                try:
                    if not os.path.exists(src):
                        # Propagate deletions
                        if os.path.isdir(dst):
                            shutil.rmtree(dst, ignore_errors=True)
                            # logger(f"删除目录: {dst}")
                        elif os.path.exists(dst):
                            os.remove(dst)
                            # logger(f"删除文件: {dst}")
                        continue

                    if os.path.isdir(src):
                        os.makedirs(dst, exist_ok=True)
                        continue

                    # Ensure parent directory exists
                    parent_dir = os.path.dirname(dst)
                    if parent_dir:
                        os.makedirs(parent_dir, exist_ok=True)
                    shutil.copy2(src, dst)
                    # logger(f"复制: {src} -> {dst}")
                except Exception as exc:
                    logger(f"错误: {exc}")

    logger("[START_REALTIME_SYNC] 创建 observer 和 handler")
    handler = DebouncedHandler(source_dir, ignore_patterns, handle_batch, stop_event)
    observer = Observer()
    observer.schedule(handler, source_dir, recursive=True)
    observer.start()
    logger("[START_REALTIME_SYNC] observer 已启动")

    def monitor() -> None:
        try:
            logger("[MONITOR] 启动")
            while not stop_event.is_set():
                time.sleep(0.2)
            logger("[MONITOR] 收到停止信号，退出循环")
        finally:
            logger("[MONITOR] 开始清理 observer")
            observer.stop()
            observer.join(timeout=5)
            logger("[MONITOR] 清理完成")

    t = threading.Thread(target=monitor, daemon=False)
    t.start()
    return observer


class FileMonitor:
    """文件监控器，封装实时监控逻辑"""
    
    def __init__(self, engine, callback: Optional[Callable] = None):
        self.engine = engine
        self.callback = callback
        self.stop_event = threading.Event()
        self.observer: Optional[Observer] = None
        self.monitor_thread: Optional[threading.Thread] = None
    
    def _log(self, message: str):
        """记录日志"""
        if self.callback:
            try:
                self.callback(message)
            except TypeError:
                # 兼容只接受一个参数的callback
                pass
    
    def start(self):
        """启动监控"""
        self._log("[FILE_MONITOR] 启动文件监控")
        self.observer = start_realtime_sync(
            self.engine.source_dir,
            self.engine.target_dirs,
            self.engine.ignore_patterns,
            self.stop_event,
            self._log
        )
    
    def stop(self):
        """停止监控"""
        self._log("[FILE_MONITOR] 设置 stop_event")
        self.stop_event.set()
        # observer会由monitor的finally块自动停止
