import os
import logging
import threading
import time
from datetime import datetime
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from watchdog.events import FileSystemEventHandler
from sync_handler import SyncHandler
from database import db_manager
from ftp_manager import ftp_manager
from file_syncer import FileSyncer
from websocket_manager import websocket_manager
import ftplib

sync_logger = logging.getLogger('sync')

class SyncManager:
    def __init__(self):
        # 在 Windows 上使用 PollingObserver，其他系统使用普通 Observer
        if os.name == 'nt':
            self.observer = PollingObserver()
        else:
            self.observer = Observer()
        self.handlers = {}
        self.server_check_interval = 30  # 30秒检查一次服务器变化
        self._stop_flag = False
        self._server_check_thread = None
        self.ignore_patterns = ['.svn', '.git', '.DS_Store', 'Thumbs.db']
        self.auto_sync_on_start = False  # 默认禁用自动同步

    def start(self):
        """启动同步管理器"""
        try:
            sync_logger.info("初始化同步管理器")
            
            # 清理所有现有处理器
            self.handlers.clear()
            
            # 创建和启动主观察者
            sync_logger.info("创建和启动主观察者")
            self.observer.start()
            sync_logger.info("主观察者启动成功")
            
            # 启动所有目录的观察者
            sync_logger.info("开始启动所有目录观察者")
            self._start_all_watchers()
            sync_logger.info("所有目录观察者启动完成")
            
            # 启动服务器检查线程
            self._start_server_check_thread()
            sync_logger.info("服务器检查线程启动成功")
            
        except Exception as e:
            sync_logger.error(f"启动同步管理器失败: {str(e)}")
            self.stop()
            raise

    def _start_server_check_thread(self):
        """启动服务器检查线程"""
        def check_server():
            force_sync_counter = 0  # 用于计数，决定何时进行强制同步
            force_sync_interval = 5  # 每5次检查进行一次强制同步
            
            while not self._stop_flag:
                try:
                    force_sync_counter += 1
                    force_sync = (force_sync_counter % force_sync_interval == 0)
                    if force_sync:
                        sync_logger.info(f"执行第 {force_sync_counter} 次检查，将进行强制同步")
                    
                    # 获取所有同步目录
                    with db_manager.get_connection() as db:
                        rows = db.execute('SELECT id, sync_strategy FROM sync_dirs').fetchall()
                        
                        # 记录当前活动的同步目录ID
                        active_dir_ids = set(row['id'] for row in rows)
                        
                        # 清理不在数据库中的处理器
                        to_remove = [dir_id for dir_id in self.handlers.keys() if dir_id not in active_dir_ids]
                        for dir_id in to_remove:
                            sync_logger.info(f"移除不存在的同步目录处理器: {dir_id}")
                            self.stop_watcher(dir_id)
                            del self.handlers[dir_id]
                        
                    # 对每个目录检查服务器变化，无论auto_sync_on_start设置如何
                    for row in rows:
                        try:
                            sync_dir_id = row['id']
                            strategy = row['sync_strategy']
                            
                            # 检查目录是否仍然存在于数据库中
                            with db_manager.get_connection() as db:
                                dir_exists = db.execute('SELECT 1 FROM sync_dirs WHERE id = ?', (sync_dir_id,)).fetchone() is not None
                                if not dir_exists:
                                    sync_logger.info(f"跳过已删除的同步目录: {sync_dir_id}")
                                    continue
                            
                            # 只有当同步策略不是'none'时才执行同步
                            if strategy and strategy != 'none':
                                handler = self.handlers.get(sync_dir_id)
                                
                                if handler:
                                    # 确保handler.syncer存在，如果不存在，创建一个
                                    if not handler.syncer:
                                        try:
                                            handler.syncer = FileSyncer(sync_dir_id, db_manager.get_db_path())
                                            sync_logger.info(f"为处理器 {sync_dir_id} 创建新的同步器")
                                        except Exception as syncer_e:
                                            sync_logger.error(f"创建同步器失败: {str(syncer_e)}")
                                            continue
                                    
                                    # 根据策略类型可能调整强制同步
                                    # 对于服务器优先策略，我们需要更频繁地强制同步
                                    strategy_specific_force = force_sync
                                    if strategy == 'server_priority' and force_sync_counter % 2 == 0:
                                        strategy_specific_force = True
                                        sync_logger.info(f"目录 {sync_dir_id} 使用服务器优先策略，额外进行强制同步")
                                    
                                    sync_logger.info(f"检查目录 {sync_dir_id} 的服务器变化 (策略: {strategy}, 强制同步: {strategy_specific_force})")
                                    handler.syncer.check_server_changes(force_sync=strategy_specific_force)
                                else:
                                    sync_logger.warning(f"目录 {sync_dir_id} 没有处理器，跳过服务器检查")
                            else:
                                sync_logger.info(f"跳过目录 {sync_dir_id} 的服务器检查，未设置同步策略")
                        except Exception as e:
                            sync_logger.error(f"检查目录 {sync_dir_id} 的服务器变化失败: {str(e)}")
                            continue
                            
                except Exception as e:
                    sync_logger.error(f"服务器检查线程错误: {str(e)}")
                
                # 等待下一次检查
                for _ in range(self.server_check_interval * 2):  # 分成更小的间隔以便及时响应停止信号
                    if self._stop_flag:
                        break
                    time.sleep(0.5)

        self._stop_flag = False
        self._server_check_thread = threading.Thread(target=check_server)
        self._server_check_thread.daemon = True
        self._server_check_thread.start()

    def _start_all_watchers(self):
        """启动所有目录的观察者"""
        try:
            with db_manager.get_connection() as db:
                rows = db.execute('SELECT id, local_path FROM sync_dirs').fetchall()
                
            for row in rows:
                self.start_watcher(row['id'], row['local_path'])
                
        except Exception as e:
            sync_logger.error(f"启动观察者失败: {str(e)}")
            raise

    def start_watcher(self, sync_dir_id, local_path):
        """启动单个目录的观察者"""
        try:
            # 如果已经存在处理器，先停止它
            if sync_dir_id in self.handlers:
                self.observer.unschedule(self.handlers[sync_dir_id].observer_schedule)
                self.handlers[sync_dir_id].syncer = None
                del self.handlers[sync_dir_id]

            # 创建新的处理器
            handler = SyncHandler(sync_dir_id, self.ignore_patterns)
            schedule = self.observer.schedule(handler, local_path, recursive=True)
            handler.observer_schedule = schedule
            self.handlers[sync_dir_id] = handler
            
            # 获取同步策略但不立即执行同步检查
            try:
                syncer = FileSyncer(sync_dir_id, db_manager.get_db_path())
                # 获取同步策略
                strategy = syncer.sync_info.get('sync_strategy')
                
                # 仅记录信息，不执行同步
                if strategy and strategy != 'none':
                    sync_logger.info(f"观察者已启动，同步策略: dir_id={sync_dir_id}, strategy={strategy}")
                else:
                    sync_logger.info(f"观察者已启动，等待设置同步策略: dir_id={sync_dir_id}")
                    
            except Exception as e:
                sync_logger.error(f"获取同步策略失败: {str(e)}")

            sync_logger.info(f"观察者启动成功: dir_id={sync_dir_id}, path={local_path}")
            
        except Exception as e:
            sync_logger.error(f"启动观察者失败: dir_id={sync_dir_id}, path={local_path}, 错误: {str(e)}")
            raise

    def restart_watcher(self, sync_dir_id):
        """重启指定目录的观察者"""
        try:
            with db_manager.get_connection() as db:
                row = db.execute('SELECT local_path FROM sync_dirs WHERE id = ?', (sync_dir_id,)).fetchone()
                if row:
                    self.start_watcher(sync_dir_id, row['local_path'])
                    sync_logger.info(f"观察者重启成功: dir_id={sync_dir_id}")
                else:
                    sync_logger.error(f"找不到同步目录: dir_id={sync_dir_id}")
        except Exception as e:
            sync_logger.error(f"重启观察者失败: dir_id={sync_dir_id}, 错误: {str(e)}")
            raise

    def stop(self):
        """停止同步管理器"""
        try:
            # 停止服务器检查线程
            self._stop_flag = True
            if self._server_check_thread:
                self._server_check_thread.join(timeout=5)
            
            # 停止观察者
            self.observer.stop()
            self.observer.join(timeout=5)
            
            # 清理处理器
            self.handlers.clear()
            
        except Exception as e:
            sync_logger.error(f"停止同步管理器失败: {str(e)}")
            raise

    def add_sync_dir(self, user_id, local_path, ftp_host, ftp_user, ftp_pass, ftp_path, sync_strategy, ignore_patterns=None):
        """添加同步目录"""
        try:
            # 验证本地路径是否存在
            if not os.path.exists(local_path) or not os.path.isdir(local_path):
                raise ValueError("本地路径不存在或不是目录")

            # 验证FTP连接
            try:
                with ftp_manager.connection(ftp_host, ftp_user, ftp_pass) as ftp:
                    # 尝试切换到目标目录
                    try:
                        ftp.cwd(ftp_path)
                    except ftplib.error_perm:
                        # 如果目录不存在，尝试创建
                        try:
                            ftp.mkd(ftp_path)
                            ftp.cwd(ftp_path)
                        except ftplib.error_perm as e:
                            raise ValueError(f"无法创建FTP目录: {str(e)}")
            except Exception as e:
                raise ValueError(f"FTP连接失败: {str(e)}")

            # 添加到数据库
            with db_manager.get_connection() as db:
                # 设置初始同步状态
                sync_status = 'active' if sync_strategy and sync_strategy != 'none' else 'pending'
                
                cursor = db.execute('''
                    INSERT INTO sync_dirs 
                    (user_id, local_path, ftp_host, ftp_user, ftp_pass, ftp_path, sync_strategy, sync_status) 
                    VALUES (?, ?, ?, ?, ?, ?, ?, ?)
                ''', (user_id, local_path, ftp_host, ftp_user, ftp_pass, ftp_path, sync_strategy, sync_status))
                sync_dir_id = cursor.lastrowid
                db.commit()

            # 启动观察者
            self.start_watcher(sync_dir_id, local_path)
            
            # 如果设置了有效的同步策略，立即执行初始同步
            if sync_strategy and sync_strategy != 'none':
                try:
                    handler = self.handlers.get(sync_dir_id)
                    if handler:
                        if not handler.syncer:
                            handler.syncer = FileSyncer(sync_dir_id, db_manager.get_db_path())
                        else:
                            handler.syncer.refresh_sync_info()
                        
                        sync_logger.info(f"执行初始同步: dir_id={sync_dir_id}, strategy={sync_strategy}")
                        force_sync = (sync_strategy == 'server_priority')
                        handler.syncer.check_server_changes(force_sync=force_sync)
                except Exception as e:
                    sync_logger.error(f"执行初始同步失败: {str(e)}")
            
            sync_logger.info(f"同步目录添加成功: id={sync_dir_id}, strategy={sync_strategy}, status={sync_status}")
            
            return sync_dir_id
            
        except Exception as e:
            sync_logger.error(f"添加同步目录失败: {str(e)}")
            raise

    def remove_sync_dir(self, sync_dir_id):
        """移除同步目录"""
        try:
            # 停止观察者
            self.stop_watcher(sync_dir_id)

            # 清理处理器
            if sync_dir_id in self.handlers:
                del self.handlers[sync_dir_id]
                sync_logger.info(f"已清理同步目录处理器 ID: {sync_dir_id}")

            # 从数据库中删除
            with db_manager.get_connection() as db:
                db.execute('DELETE FROM sync_dirs WHERE id = ?', (sync_dir_id,))
                db.execute('DELETE FROM sync_history WHERE sync_dir_id = ?', (sync_dir_id,))
                db.commit()

            # 清理FileSyncer实例
            if hasattr(FileSyncer, '_instances') and sync_dir_id in FileSyncer._instances:
                with FileSyncer._lock:
                    del FileSyncer._instances[sync_dir_id]
                sync_logger.info(f"已清理FileSyncer实例 ID: {sync_dir_id}")

        except Exception as e:
            sync_logger.error(f"移除同步目录失败: sync_dir_id={sync_dir_id}, error={str(e)}")
            raise

    def get_sync_status(self, sync_dir_id):
        """获取同步状态"""
        try:
            with db_manager.get_connection() as db:
                row = db.execute('''
                    SELECT sync_status, last_sync, sync_strategy
                    FROM sync_dirs 
                    WHERE id = ?
                ''', (sync_dir_id,)).fetchone()
                
                if row:
                    # 如果状态为空，根据同步策略设置默认状态
                    status = row['sync_status']
                    if not status:
                        if row['sync_strategy'] and row['sync_strategy'] != 'none':
                            status = 'active'
                        else:
                            status = 'pending'
                            
                    return {
                        'status': status,
                        'last_sync': row['last_sync'],
                        'sync_strategy': row['sync_strategy']
                    }
                return None

        except Exception as e:
            sync_logger.error(f"获取同步状态失败: {str(e)}")
            raise

    def get_sync_history(self, sync_dir_id):
        """获取同步历史"""
        try:
            with db_manager.get_connection() as db:
                rows = db.execute('''
                    SELECT filename, direction, operation, timestamp, path
                    FROM sync_history
                    WHERE sync_dir_id = ?
                    ORDER BY timestamp DESC
                    LIMIT 100
                ''', (sync_dir_id,)).fetchall()
                
                return [dict(row) for row in rows]

        except Exception as e:
            sync_logger.error(f"获取同步历史失败: {str(e)}")
            raise

    def list_sync_dirs(self):
        """列出所有同步目录"""
        try:
            with db_manager.get_connection() as db:
                rows = db.execute('''
                    SELECT id, local_path, ftp_host, ftp_path, sync_status, last_sync
                    FROM sync_dirs
                ''').fetchall()
                
                return [dict(row) for row in rows]

        except Exception as e:
            sync_logger.error(f"列出同步目录失败: {str(e)}")
            raise

    def update_sync_status(self, sync_dir_id, status):
        """更新同步状态"""
        try:
            with db_manager.get_connection() as db:
                current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                db.execute('''
                    UPDATE sync_dirs 
                    SET sync_status = ?, last_sync = ?
                    WHERE id = ?
                ''', (status, current_time, sync_dir_id))
                db.commit()  # 确保更改被提交

                # 发送WebSocket更新
                websocket_manager.emit_sync_status(
                    sync_dir_id,
                    status,
                    f"同步状态已更新为: {status}"
                )
                sync_logger.info(f"同步状态已更新: sync_dir_id={sync_dir_id}, status={status}")

        except Exception as e:
            sync_logger.error(f"更新同步状态失败: sync_dir_id={sync_dir_id}, error={str(e)}")
            raise

    def stop_watcher(self, sync_dir_id):
        """停止指定同步目录的观察者"""
        try:
            if sync_dir_id in self.handlers:
                sync_logger.info(f"停止观察者: sync_dir_id={sync_dir_id}")
                handler = self.handlers[sync_dir_id]
                self.observer.unschedule(handler.observer_schedule)
                handler.syncer = None
                del self.handlers[sync_dir_id]
                
        except Exception as e:
            sync_logger.error(f"停止观察者失败: sync_dir_id={sync_dir_id}, error={str(e)}")
            raise

    def enable_auto_sync(self, sync_dir_id=None):
        """启用自动同步功能
        
        Args:
            sync_dir_id: 如果指定，则只对该目录启用自动同步；如果为None，则全局启用
        """
        self.auto_sync_on_start = True
        sync_logger.info("已启用自动同步功能")
        
        # 如果指定了特定目录，立即执行一次强制同步
        if sync_dir_id is not None:
            try:
                handler = self.handlers.get(sync_dir_id)
                if handler and handler.syncer:
                    sync_logger.info(f"立即执行目录 {sync_dir_id} 的强制同步")
                    handler.syncer.check_server_changes(force_sync=True)
            except Exception as e:
                sync_logger.error(f"执行目录 {sync_dir_id} 的强制同步失败: {str(e)}")
                
    def disable_auto_sync(self):
        """禁用自动同步功能"""
        self.auto_sync_on_start = False
        sync_logger.info("已禁用自动同步功能")

    def update_sync_strategy(self, sync_dir_id, sync_strategy):
        """更新同步策略并在需要时执行初始同步
        
        Args:
            sync_dir_id: 同步目录ID
            sync_strategy: 同步策略 ('local_priority', 'server_priority', 'time_size', 'none')
        """
        try:
            # 更新数据库中的同步策略
            with db_manager.get_connection() as db:
                db.execute('''
                    UPDATE sync_dirs 
                    SET sync_strategy = ? 
                    WHERE id = ?
                ''', (sync_strategy, sync_dir_id))
                db.commit()
                
            sync_logger.info(f"同步策略已更新: dir_id={sync_dir_id}, strategy={sync_strategy}")
            
            # 如果设置了有效的同步策略，执行初始同步
            if sync_strategy and sync_strategy != 'none':
                handler = self.handlers.get(sync_dir_id)
                if handler:
                    if not handler.syncer:
                        handler.syncer = FileSyncer(sync_dir_id, db_manager.get_db_path())
                    else:
                        # 刷新同步器的同步信息
                        handler.syncer.refresh_sync_info()
                    
                    sync_logger.info(f"执行策略变更后的初始同步: dir_id={sync_dir_id}, strategy={sync_strategy}")
                    # 根据策略类型决定是否强制同步
                    force_sync = (sync_strategy == 'server_priority')
                    handler.syncer.check_server_changes(force_sync=force_sync)
                    
                    # 更新同步状态
                    self.update_sync_status(sync_dir_id, 'active')
                else:
                    sync_logger.warning(f"找不到同步目录的处理器: dir_id={sync_dir_id}")
            else:
                # 如果策略设为none，更新状态为pending
                self.update_sync_status(sync_dir_id, 'pending')
            
            return True
            
        except Exception as e:
            sync_logger.error(f"更新同步策略失败: dir_id={sync_dir_id}, strategy={sync_strategy}, 错误: {str(e)}")
            return False