#!/usr/bin/env python3
"""
ES索引迁移工具 - Web界面后端API
提供配置接口和实时日志展示功能
"""

import json
import logging
import threading
import time
import uuid
from datetime import datetime, timedelta
from queue import Queue, Empty
from flask import Flask, render_template, request, jsonify
from flask_socketio import SocketIO, emit
from es_index_migrator import ESIndexMigrator
from elasticsearch import Elasticsearch
import os # Added for file path handling
from elasticsearch.exceptions import NotFoundError # Added for NotFoundError

app = Flask(__name__)
app.config['SECRET_KEY'] = 'es_migration_tool_secret'
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='threading')

# 全局变量
migration_status = {
    'running': False,
    'current_step': '',
    'results': None
}

log_queue = Queue()

# 任务管理器
class TaskManager:
    def __init__(self):
        self.tasks = {}
        self.lock = threading.Lock()
    
    def create_task(self, task_type, description, config):
        task_id = str(uuid.uuid4())
        task = {
            'id': task_id,
            'type': task_type,  # 'migration', 'reindex'
            'description': description,
            'status': 'pending',  # pending, running, completed, failed
            'progress': 0,
            'current_step': '',
            'created_at': datetime.now().isoformat(),
            'started_at': None,
            'completed_at': None,
            'config': config,
            'results': None,
            'error': None,
            'indices': config.get('indices', []),
            'total_indices': len(config.get('indices', [])),
            'processed_indices': 0,
            'es_tasks': {},  # 存储ES任务ID {index_name: {'task_id': 'xxx', 'start_time': 'xxx'}}
            'detailed_progress': {},  # 详细进度信息 {index_name: {'docs_processed': 0, 'total_docs': 0, 'rate': 0, 'eta': 0}}
            'estimated_completion': None,  # 预计完成时间
            'total_docs': 0,
            'processed_docs': 0,
            'process_logs': [],  # 处理过程日志
            'index_results': {}  # 每个索引的详细结果 {index_name: {'status': 'success/failed', 'message': 'xxx', 'docs_count': 0}}
        }
        
        with self.lock:
            self.tasks[task_id] = task
        
        return task_id
    
    def update_task(self, task_id, **kwargs):
        with self.lock:
            if task_id in self.tasks:
                self.tasks[task_id].update(kwargs)
                # 通过WebSocket发送任务状态更新
                socketio.emit('task_update', self.tasks[task_id])
    
    def get_task(self, task_id):
        with self.lock:
            return self.tasks.get(task_id)
    
    def get_all_tasks(self):
        with self.lock:
            return list(self.tasks.values())
    
    def get_running_tasks(self):
        with self.lock:
            return [task for task in self.tasks.values() if task['status'] == 'running']
    
    def update_es_task(self, task_id, index_name, es_task_id):
        """更新ES任务ID"""
        with self.lock:
            if task_id in self.tasks:
                self.tasks[task_id]['es_tasks'][index_name] = {
                    'task_id': es_task_id,
                    'start_time': datetime.now().isoformat()
                }
    
    def update_detailed_progress(self, task_id, index_name, progress_info):
        """更新详细进度信息"""
        with self.lock:
            if task_id in self.tasks:
                self.tasks[task_id]['detailed_progress'][index_name] = progress_info
                # 计算总体进度
                self._calculate_overall_progress(task_id)
    
    def _calculate_overall_progress(self, task_id):
        """计算任务的总体进度和预计时间"""
        task = self.tasks[task_id]
        detailed_progress = task['detailed_progress']
        
        if not detailed_progress:
            return
        
        total_docs = sum(progress.get('total_docs', 0) for progress in detailed_progress.values())
        processed_docs = sum(progress.get('docs_processed', 0) for progress in detailed_progress.values())
        
        task['total_docs'] = total_docs
        task['processed_docs'] = processed_docs
        
        if total_docs > 0:
            overall_progress = int((processed_docs / total_docs) * 100)
            task['progress'] = min(overall_progress, 100)
        
        # 计算预计完成时间
        if task['started_at']:
            start_time = datetime.fromisoformat(task['started_at'])
            elapsed_seconds = (datetime.now() - start_time).total_seconds()
            
            if processed_docs > 0 and elapsed_seconds > 0:
                docs_per_second = processed_docs / elapsed_seconds
                remaining_docs = total_docs - processed_docs
                if docs_per_second > 0:
                    eta_seconds = remaining_docs / docs_per_second
                    estimated_completion = datetime.now() + timedelta(seconds=eta_seconds)
                    task['estimated_completion'] = estimated_completion.isoformat()
    
    def add_task_log(self, task_id, level, message):
        """添加任务处理日志"""
        with self.lock:
            if task_id in self.tasks:
                log_entry = {
                    'timestamp': datetime.now().isoformat(),
                    'level': level,
                    'message': message
                }
                self.tasks[task_id]['process_logs'].append(log_entry)
                # 只保留最近100条日志
                if len(self.tasks[task_id]['process_logs']) > 100:
                    self.tasks[task_id]['process_logs'] = self.tasks[task_id]['process_logs'][-100:]
    
    def update_index_result(self, task_id, index_name, status, message, docs_count=0, conflicts=0):
        """更新索引处理结果"""
        with self.lock:
            if task_id in self.tasks:
                self.tasks[task_id]['index_results'][index_name] = {
                    'status': status,
                    'message': message,
                    'docs_count': docs_count,
                    'conflicts': conflicts,
                    'timestamp': datetime.now().isoformat()
                }

task_manager = TaskManager()

# ES任务监控线程
def start_es_task_monitor():
    """启动ES任务监控线程"""
    def monitor_tasks():
        while True:
            try:
                running_tasks = task_manager.get_running_tasks()
                for task in running_tasks:
                    if task['type'] == 'reindex' and task['es_tasks']:
                        update_reindex_progress(task['id'])
                time.sleep(5)  # 每5秒检查一次
            except Exception as e:
                print(f"任务监控错误: {e}")
                time.sleep(10)
    
    monitor_thread = threading.Thread(target=monitor_tasks, daemon=True)
    monitor_thread.start()

def update_reindex_progress(task_id):
    """更新reindex任务的进度"""
    task = task_manager.get_task(task_id)
    if not task:
        return
    
    try:
        config = task['config']
        target_es, target_version = create_es_client(config['target_config'])
        
        for index_name, es_task_info in task['es_tasks'].items():
            es_task_id = es_task_info['task_id']
            
            try:
                # 根据ES版本使用不同的API调用方式
                response = target_es.tasks.get(task_id=es_task_id)
                
                # 检查是否有错误
                if 'error' in response:
                    error_info = response['error']
                    error_msg = json.dumps(error_info, ensure_ascii=False)
                    task_manager.add_task_log(task_id, 'ERROR', f'❌ 索引 {index_name} reindex失败: {error_msg}')
                    task_manager.update_index_result(task_id, index_name, 'failed', error_msg)
                    continue
                
                task_info = response.get('task', {})
                
                # 获取详细的状态信息
                if 'status' in task_info:
                    status = task_info['status']
                    
                    # 检查是否有失败的文档
                    if 'failures' in status and status['failures']:
                        failures = status['failures']
                        failure_msg = json.dumps(failures[:5], ensure_ascii=False)  # 只显示前5个失败
                        task_manager.add_task_log(task_id, 'WARNING', f'⚠️ 索引 {index_name} 有文档失败: {failure_msg}')
                    
                    # 记录详细的进度信息
                    progress_info = {
                        'total': status.get('total', 0),
                        'created': status.get('created', 0),
                        'updated': status.get('updated', 0),
                        'deleted': status.get('deleted', 0),
                        'batches': status.get('batches', 0),
                        'version_conflicts': status.get('version_conflicts', 0),
                        'noops': status.get('noops', 0),
                        'retries': status.get('retries', {}).get('bulk', 0),
                        'throttled_millis': status.get('throttled_millis', 0),
                        'requests_per_second': status.get('requests_per_second', -1),
                        'throttled_until_millis': status.get('throttled_until_millis', 0)
                    }
                    
                    task_manager.add_task_log(task_id, 'INFO', 
                        f'📊 索引 {index_name} 进度: '
                        f'总数={progress_info["total"]}, '
                        f'已创建={progress_info["created"]}, '
                        f'已更新={progress_info["updated"]}, '
                        f'已删除={progress_info["deleted"]}, '
                        f'版本冲突={progress_info["version_conflicts"]}'
                    )
                
                if task_info:
                    status = task_info.get('status', {})
                    
                    # 解析进度信息
                    created = status.get('created', 0)
                    updated = status.get('updated', 0)
                    deleted = status.get('deleted', 0)
                    version_conflicts = status.get('version_conflicts', 0)
                    total = status.get('total', 0)
                    
                    processed = created + updated + deleted
                    
                    # 计算处理速度
                    start_time = datetime.fromisoformat(es_task_info['start_time'])
                    elapsed = (datetime.now() - start_time).total_seconds()
                    rate = processed / elapsed if elapsed > 0 else 0
                    
                    # 预计剩余时间
                    remaining = total - processed
                    eta = remaining / rate if rate > 0 else 0
                    
                    progress_info = {
                        'docs_processed': processed,
                        'total_docs': total,
                        'created': created,
                        'updated': updated,
                        'deleted': deleted,
                        'version_conflicts': version_conflicts,
                        'rate': round(rate, 2),
                        'eta_seconds': round(eta),
                        'eta_formatted': format_duration(eta)
                    }
                    
                    task_manager.update_detailed_progress(task_id, index_name, progress_info)
                    
                    # 检查任务是否完成
                    if not response.get('completed', False):
                        # 任务仍在运行
                        current_step = f"正在reindex {index_name}: {processed}/{total} 文档"
                        task_manager.update_task(task_id, current_step=current_step)
                    else:
                        # 任务完成
                        current_step = f"完成 {index_name}: {processed} 文档"
                        task_manager.update_task(task_id, current_step=current_step)
                        
            except Exception as e:
                if "resource_not_found_exception" in str(e):
                    # 任务已完成或不存在
                    continue
                else:
                    print(f"查询任务 {es_task_id} 状态失败: {e}")
                    
    except Exception as e:
        print(f"更新任务 {task_id} 进度失败: {e}")

def format_duration(seconds):
    """格式化持续时间"""
    if seconds < 60:
        return f"{int(seconds)}秒"
    elif seconds < 3600:
        minutes = int(seconds / 60)
        secs = int(seconds % 60)
        return f"{minutes}分{secs}秒"
    else:
        hours = int(seconds / 3600)
        minutes = int((seconds % 3600) / 60)
        return f"{hours}时{minutes}分"


def generate_preview_filename(source_config, target_config, prefix="preview", include_timestamp=True):
    """生成预览文件名"""
    def get_host_info(config):
        host = config.get('host', 'unknown')
        port = config.get('port', 9200)
        path = config.get('path', '')
        
        # 处理host中可能包含的路径
        if '/' in host:
            host = host.split('/')[0]
        
        # 处理可能的IPv6地址和其他特殊字符
        import re
        host = re.sub(r'[^\w\-]', '_', host)
        
        # 如果有path，添加到标识符中
        if path:
            path = re.sub(r'[^\w\-]', '_', path.strip('/'))
            return f"{host}_{port}_{path}"
        return f"{host}_{port}"
    
    source_id = get_host_info(source_config)
    target_id = get_host_info(target_config)
    
    filename = f"{prefix}_{source_id}_to_{target_id}"
    
    if include_timestamp:
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        filename = f"{filename}_{timestamp}"
    
    return f"{filename}.json"


class WebLogHandler(logging.Handler):
    """自定义日志处理器，将日志发送到Web界面"""
    
    def emit(self, record):
        log_entry = {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': record.levelname,
            'message': self.format(record)
        }
        log_queue.put(log_entry)
        # 通过WebSocket发送日志
        socketio.emit('log_message', log_entry)


@app.route('/')
def index():
    """主页面"""
    return render_template('index.html')


@app.route('/api/test_connection', methods=['POST'])
def test_connection():
    """测试ES连接"""
    try:
        data = request.json
        source_config = data.get('source_config')
        target_config = data.get('target_config')
        
        # 测试连接
        migrator = ESIndexMigrator(source_config, target_config)
        
        return jsonify({
            'success': True,
            'message': '连接测试成功！'
        })
        
    except Exception as e:
        return jsonify({
            'success': False,
            'message': f'连接测试失败: {str(e)}'
        })


@app.route('/api/preview_migration', methods=['POST'])
def preview_migration():
    """预览迁移"""
    try:
        data = request.json
        source_config = data.get('source_config')
        target_config = data.get('target_config')
        
        # 设置日志处理器用于预览
        logger = logging.getLogger('es_index_migrator')
        web_handler = WebLogHandler()
        web_handler.setFormatter(logging.Formatter('%(message)s'))
        logger.addHandler(web_handler)
        logger.setLevel(logging.INFO)
        
        # 发送预览开始日志
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': '🔍 开始预览迁移...'
        })
        
        # 创建迁移器（这会触发版本检测）
        migrator = ESIndexMigrator(source_config, target_config)
        
        # 发送版本信息
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': f'📊 ES版本信息:'
        })
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': f'   源ES: {migrator.source_version}'
        })
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': f'   目标ES: {migrator.target_version}'
        })
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': f'   迁移策略: {migrator.migration_strategy}'
        })
        
        # 生成基于IP的唯一文件名（预览不需要时间戳）
        preview_filename = generate_preview_filename(source_config, target_config, "preview", include_timestamp=False)
        
        # 导出索引结构
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': '📤 正在扫描源ES索引...'
        })
        
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': f'📁 预览文件: {preview_filename}'
        })
        
        exported_data = migrator.export_index_structures(preview_filename)
        
        # 检查目标ES中的重复索引
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': '🔍 检查目标ES中的重复索引...'
        })
        
        existing_indices = []
        new_indices = []
        
        for index_name in exported_data.keys():
            if migrator.target_es.indices.exists(index=index_name):
                existing_indices.append(index_name)
            else:
                new_indices.append(index_name)
        
        # 构建详细的索引信息
        indices_info = []
        for index_name, index_data in exported_data.items():
            # 分析映射复杂度
            mappings = index_data.get('mappings', {})
            properties_count = 0
            if isinstance(mappings, dict):
                if 'properties' in mappings:
                    properties_count = len(mappings['properties'])
                else:
                    # 可能是ES6格式，有类型定义
                    for type_name, type_mapping in mappings.items():
                        if isinstance(type_mapping, dict) and 'properties' in type_mapping:
                            properties_count += len(type_mapping['properties'])
            
            indices_info.append({
                'name': index_name,
                'properties_count': properties_count,
                'settings_count': len(index_data.get('settings', {}).get('index', {})),
                'exists_in_target': index_name in existing_indices,
                'source_version': index_data.get('source_version', 'unknown')
            })
        
        # 发送预览结果日志
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': '📋 预览结果:'
        })
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': f'   总索引数: {len(exported_data)}'
        })
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': f'   新建索引: {len(new_indices)}'
        })
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'WARNING' if existing_indices else 'INFO',
            'message': f'   重复索引: {len(existing_indices)}'
        })
        
        # 显示重复索引详情
        if existing_indices:
            socketio.emit('log_message', {
                'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                'level': 'WARNING',
                'message': '⚠️ 以下索引在目标ES中已存在:'
            })
            for idx in existing_indices[:10]:  # 只显示前10个
                socketio.emit('log_message', {
                    'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    'level': 'WARNING',
                    'message': f'   - {idx}'
                })
            if len(existing_indices) > 10:
                socketio.emit('log_message', {
                    'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    'level': 'WARNING',
                    'message': f'   ... 还有 {len(existing_indices) - 10} 个重复索引'
                })
        
        # 显示新建索引预览
        if new_indices:
            socketio.emit('log_message', {
                'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                'level': 'INFO',
                'message': '✅ 将新建的索引:'
            })
            for idx in new_indices[:10]:  # 只显示前10个
                socketio.emit('log_message', {
                    'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    'level': 'INFO',
                    'message': f'   + {idx}'
                })
            if len(new_indices) > 10:
                socketio.emit('log_message', {
                    'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    'level': 'INFO',
                    'message': f'   ... 还有 {len(new_indices) - 10} 个新索引'
                })
        
        # 转换策略信息
        conversion_info = []
        if migrator._needs_mapping_conversion():
            conversion_info.append("映射结构转换")
        if migrator._needs_settings_conversion():
            conversion_info.append("设置参数调整")
        
        if conversion_info:
            socketio.emit('log_message', {
                'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                'level': 'INFO',
                'message': f'🔄 需要进行: {", ".join(conversion_info)}'
            })
        else:
            socketio.emit('log_message', {
                'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                'level': 'INFO',
                'message': '🔄 无需结构转换，保持原有格式'
            })
        
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': '✨ 预览完成！'
        })
        
        # 清理日志处理器
        logger.removeHandler(web_handler)
        
        return jsonify({
            'success': True,
            'indices': indices_info,
            'total_count': len(exported_data),
            'new_indices_count': len(new_indices),
            'existing_indices_count': len(existing_indices),
            'existing_indices': existing_indices,
            'migration_strategy': migrator.migration_strategy,
            'source_version': migrator.source_version,
            'target_version': migrator.target_version,
            'needs_conversion': bool(conversion_info),
            'preview_file': preview_filename
        })
        
    except Exception as e:
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'ERROR',
            'message': f'❌ 预览失败: {str(e)}'
        })
        return jsonify({
            'success': False,
            'message': f'预览失败: {str(e)}'
        })


@app.route('/api/start_selective_migration', methods=['POST'])
def start_selective_migration():
    """开始选择性迁移"""
    try:
        data = request.json
        source_config = data.get('source_config')
        target_config = data.get('target_config')
        selected_indices = data.get('selected_indices', [])
        migration_type = data.get('migration_type', 'all')  # 'all' or 'selected'
        
        if migration_type == 'selected' and not selected_indices:
            return jsonify({
                'success': False,
                'message': '请选择要迁移的索引'
            })
        
        # 创建任务
        task_config = {
            'source_config': source_config,
            'target_config': target_config,
            'indices': selected_indices if migration_type == 'selected' else [],
            'migration_type': migration_type
        }
        
        description = f"迁移 {len(selected_indices)} 个索引" if migration_type == 'selected' else "迁移所有索引"
        task_id = task_manager.create_task('migration', description, task_config)
        
        # 在后台线程中运行任务
        thread = threading.Thread(
            target=run_selective_migration_task,
            args=(task_id,)
        )
        thread.daemon = True
        thread.start()
        
        return jsonify({
            'success': True,
            'task_id': task_id,
            'message': '迁移任务已创建，请查看任务列表...'
        })
        
    except Exception as e:
        return jsonify({
            'success': False,
            'message': f'启动迁移失败: {str(e)}'
        })


@app.route('/api/start_reindex', methods=['POST'])
def start_reindex():
    """开始reindex操作"""
    try:
        data = request.json
        source_config = data.get('source_config')
        target_config = data.get('target_config')
        selected_indices = data.get('selected_indices', [])
        query_config = data.get('query_config')  # 获取查询条件
        
        if not selected_indices:
            return jsonify({
                'success': False,
                'message': '请选择要reindex的索引'
            })
            
        # 创建reindex任务
        task_config = {
            'source_config': source_config,
            'target_config': target_config,
            'indices': selected_indices,
            'operation': 'reindex',
            'query_config': query_config  # 保存查询条件到任务配置
        }
        
        description = f"Reindex {len(selected_indices)} 个索引" + (" (带查询条件)" if query_config else "")
        task_id = task_manager.create_task('reindex', description, task_config)
        
        # 在后台线程中运行reindex任务
        thread = threading.Thread(
            target=run_reindex_task,
            args=(task_id,)
        )
        thread.daemon = True
        thread.start()
        
        return jsonify({
            'success': True,
            'task_id': task_id,
            'message': 'Reindex任务已创建，请查看任务列表...'
        })
        
    except Exception as e:
        return jsonify({
            'success': False,
            'message': f'启动reindex失败: {str(e)}'
        })


@app.route('/api/tasks')
def get_tasks():
    """获取所有任务列表"""
    try:
        tasks = task_manager.get_all_tasks()
        # 按创建时间倒序排列
        tasks.sort(key=lambda x: x['created_at'], reverse=True)
        return jsonify({
            'success': True,
            'tasks': tasks
        })
    except Exception as e:
        return jsonify({
            'success': False,
            'message': f'获取任务列表失败: {str(e)}'
        })


@app.route('/api/task/<task_id>')
def get_task_detail(task_id):
    """获取任务详情"""
    try:
        task = task_manager.get_task(task_id)
        if task:
            return jsonify({
                'success': True,
                'task': task
            })
        else:
            return jsonify({
                'success': False,
                'message': '任务不存在'
            })
    except Exception as e:
        return jsonify({
            'success': False,
            'message': f'获取任务详情失败: {str(e)}'
        })


@app.route('/api/task/<task_id>/cancel', methods=['POST'])
def cancel_task(task_id):
    """取消任务"""
    try:
        task = task_manager.get_task(task_id)
        if not task:
            return jsonify({
                'success': False,
                'message': '任务不存在'
            })
        
        if task['status'] not in ['pending', 'running']:
            return jsonify({
                'success': False,
                'message': f'任务状态为 {task["status"]}，无法取消'
            })
        
        # 如果是reindex任务，尝试取消ES中的任务
        if task['type'] == 'reindex' and task.get('es_tasks'):
            config = task['config']
            target_es, _ = create_es_client(config['target_config'])
            
            cancelled_tasks = []
            failed_cancellations = []
            
            for index_name, es_task_info in task['es_tasks'].items():
                es_task_id = es_task_info['task_id']
                try:
                    response = target_es.tasks.cancel(task_id=es_task_id)
                    cancelled_tasks.append(index_name)
                    socketio.emit('log_message', {
                        'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        'level': 'INFO',
                        'message': f'🛑 已取消索引 {index_name} 的reindex任务'
                    })
                except Exception as e:
                    failed_cancellations.append(f"{index_name}: {str(e)}")
        
        # 更新任务状态
        task_manager.update_task(
            task_id,
            status='failed',
            error='用户手动取消',
            completed_at=datetime.now().isoformat(),
            current_step='任务已取消'
        )
        
        return jsonify({
            'success': True,
            'message': '任务已取消',
            'details': {
                'cancelled_es_tasks': cancelled_tasks if task['type'] == 'reindex' else None,
                'failed_cancellations': failed_cancellations if task['type'] == 'reindex' else None
            }
        })
        
    except Exception as e:
        return jsonify({
            'success': False,
            'message': f'取消任务失败: {str(e)}'
        })


def run_selective_migration_task(task_id):
    """运行选择性迁移任务"""
    task = task_manager.get_task(task_id)
    if not task:
        return
    
    try:
        task_manager.update_task(task_id, status='running', started_at=datetime.now().isoformat())
        task_manager.add_task_log(task_id, 'INFO', '开始执行选择性迁移任务')
        
        config = task['config']
        source_config = config['source_config']
        target_config = config['target_config']
        selected_indices = config.get('indices', [])
        migration_type = config.get('migration_type', 'all')
        
        task_manager.add_task_log(task_id, 'INFO', f'准备迁移 {len(selected_indices)} 个索引')
        
        # 设置日志处理器
        logger = logging.getLogger('es_index_migrator')
        web_handler = WebLogHandler()
        web_handler.setFormatter(logging.Formatter('%(message)s'))
        logger.addHandler(web_handler)
        logger.setLevel(logging.INFO)
        
        # 创建迁移器
        migrator = ESIndexMigrator(source_config, target_config)
        
        task_manager.update_task(task_id, current_step='导出索引结构...', progress=10)
        
        # 导出索引结构
        migration_filename = generate_preview_filename(source_config, target_config, "selective_migration")
        exported_data = migrator.export_index_structures(migration_filename)
        
        # 如果是选择性迁移，过滤索引
        if migration_type == 'selected':
            filtered_data = {k: v for k, v in exported_data.items() if k in selected_indices}
            exported_data = filtered_data
            
            # 重新保存过滤后的数据
            with open(migration_filename, 'w', encoding='utf-8') as f:
                json.dump(exported_data, f, indent=2, ensure_ascii=False)
        
        task_manager.update_task(task_id, current_step='创建索引...', progress=50, total_indices=len(exported_data))
        
        # 逐个创建索引并更新进度
        success_count = 0
        failed_count = 0
        processed_count = 0
        
        for index_name, index_data in exported_data.items():
            try:
                processed_count += 1
                progress = 50 + (processed_count / len(exported_data)) * 40
                task_manager.update_task(
                    task_id, 
                    current_step=f'正在创建索引: {index_name}',
                    progress=int(progress),
                    processed_indices=processed_count
                )
                
                # 检查索引是否已存在
                if migrator.target_es.indices.exists(index=index_name):
                    socketio.emit('log_message', {
                        'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        'level': 'WARNING',
                        'message': f'⚠️ 索引 {index_name} 已存在，跳过创建'
                    })
                    continue
                
                # 转换映射和设置
                converted_mapping = migrator.convert_mapping(index_data['mappings'])
                converted_settings = migrator.convert_settings(index_data['settings'])
                
                # 构建创建索引的body
                create_body = {}
                if converted_mapping:
                    create_body['mappings'] = converted_mapping
                if converted_settings:
                    create_body['settings'] = converted_settings
                
                # 创建索引
                migrator.target_es.indices.create(index=index_name, body=create_body)
                success_count += 1
                
                task_manager.add_task_log(task_id, 'INFO', f'✅ 索引 {index_name} 创建成功')
                task_manager.update_index_result(task_id, index_name, 'success', '创建成功')
                
                socketio.emit('log_message', {
                    'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    'level': 'INFO',
                    'message': f'✅ 索引 {index_name} 创建成功'
                })
                
            except Exception as e:
                failed_count += 1
                error_msg = str(e)
                task_manager.add_task_log(task_id, 'ERROR', f'❌ 索引 {index_name} 创建失败: {error_msg}')
                task_manager.update_index_result(task_id, index_name, 'failed', error_msg)
                
                socketio.emit('log_message', {
                    'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    'level': 'ERROR',
                    'message': f'❌ 索引 {index_name} 创建失败: {error_msg}'
                })
        
        results = {
            'total_indices': len(exported_data),
            'success_count': success_count,
            'failed_count': failed_count,
            'migration_file': migration_filename
        }
        
        task_manager.add_task_log(task_id, 'INFO', f'🎉 迁移任务完成！成功: {success_count}, 失败: {failed_count}')
        
        task_manager.update_task(
            task_id,
            status='completed',
            progress=100,
            current_step='迁移完成',
            completed_at=datetime.now().isoformat(),
            results=results
        )
        
        # 清理日志处理器
        logger.removeHandler(web_handler)
        
    except Exception as e:
        task_manager.update_task(
            task_id,
            status='failed',
            error=str(e),
            completed_at=datetime.now().isoformat()
        )


def run_reindex_task(task_id):
    """执行reindex任务"""
    task = task_manager.get_task(task_id)
    if not task:
        return
    
    try:
        config = task['config']
        source_config = config['source_config']
        target_config = config['target_config']
        selected_indices = config.get('indices', [])
        query_config = config.get('query_config')
        
        # 更新任务状态
        task_manager.update_task(
            task_id,
            status='running',
            started_at=datetime.now().isoformat(),
            current_step='初始化连接',
            progress=0
        )
        
        # 创建ES客户端
        source_es, _ = create_es_client(source_config)
        target_es, target_version = create_es_client(target_config)
        
        # 获取目标ES版本号
        target_major_version = int(target_version.split('.')[0])
        
        # 构建基础reindex请求体
        def build_reindex_body(index_name):
            # 构建源ES的完整URL
            source_url = f"{source_config['scheme']}://{source_config['host']}:{source_config['port']}"
            if source_config.get('path'):
                source_url = f"{source_url.rstrip('/')}/{source_config['path'].strip('/')}"
            
            reindex_body = {
                "source": {
                    "remote": {
                        "host": source_url,
                        "username": source_config.get('username'),
                        "password": source_config.get('password')
                    },
                    "index": index_name
                },
                "dest": {
                    "index": index_name
                }
            }
            
            # 如果源ES没有设置认证信息，则移除认证字段
            if not source_config.get('username') or not source_config.get('password'):
                del reindex_body["source"]["remote"]["username"]
                del reindex_body["source"]["remote"]["password"]
            
            # 添加查询条件（如果有）
            query_config = config.get('query_config')
            if query_config:
                # 如果查询配置是字符串，尝试解析为JSON
                if isinstance(query_config, str):
                    try:
                        query_config = json.loads(query_config)
                    except json.JSONDecodeError:
                        task_manager.add_task_log(task_id, 'ERROR', f'❌ 查询条件JSON格式无效: {query_config}')
                        raise ValueError(f"查询条件JSON格式无效: {query_config}")
                
                # 如果查询条件已经包含 "query" 键，直接使用其值
                if isinstance(query_config, dict):
                    if 'query' in query_config:
                        reindex_body["source"]["query"] = query_config['query']
                    else:
                        reindex_body["source"]["query"] = query_config
                    task_manager.add_task_log(task_id, 'INFO', f'✅ 添加查询条件: {json.dumps(reindex_body["source"]["query"], ensure_ascii=False)}')
            
            # 添加其他优化参数
            reindex_body.update({
                "conflicts": "proceed"  # 遇到冲突时继续
            })
            
            return reindex_body
        
        # 开始处理每个索引
        total_indices = len(selected_indices)
        processed_indices = 0
        
        for index_name in selected_indices:
            try:
                # 更新当前处理的索引和进度
                current_progress = int((processed_indices / total_indices) * 100)
                task_manager.update_task(
                    task_id,
                    current_step=f'正在处理索引: {index_name} ({processed_indices + 1}/{total_indices})',
                    processed_indices=processed_indices,
                    progress=current_progress
                )
                
                # 获取源索引的文档数量
                try:
                    index_stats = source_es.indices.stats(index=index_name)
                    total_docs = index_stats['indices'][index_name]['primaries']['docs']['count']
                except Exception as e:
                    error_msg = f'无法获取索引 {index_name} 的统计信息: {str(e)}'
                    task_manager.add_task_log(task_id, 'ERROR', error_msg)
                    task_manager.update_index_result(task_id, index_name, 'failed', error_msg)
                    processed_indices += 1
                    continue
                
                # 更新详细进度信息
                task_manager.update_detailed_progress(task_id, index_name, {
                    'total_docs': total_docs,
                    'docs_processed': 0,
                    'rate': 0,
                    'eta': 0,
                    'eta_formatted': '计算中...',
                    'created': 0,
                    'updated': 0,
                    'deleted': 0,
                    'version_conflicts': 0
                })
                
                # 执行reindex
                reindex_body = build_reindex_body(index_name)
                
                # 根据目标ES版本设置参数
                reindex_params = {
                    'body': reindex_body,
                    'wait_for_completion': False
                }

                # 执行reindex
                response = target_es.reindex(**reindex_params)
                
                # 记录任务ID
                task_manager.update_es_task(task_id, index_name, response['task'])
                task_manager.add_task_log(
                    task_id, 
                    'INFO',
                    f'开始reindex索引 {index_name}, ES任务ID: {response["task"]}'
                )
                
            except Exception as e:
                error_msg = f'处理索引 {index_name} 时发生错误: {str(e)}'
                task_manager.add_task_log(task_id, 'ERROR', error_msg)
                task_manager.update_index_result(
                    task_id,
                    index_name,
                    'failed',
                    error_msg
                )
            
            processed_indices += 1
        
        # 启动监控线程
        monitor_reindex_completion(task_id)
        
    except Exception as e:
        error_msg = f'执行reindex任务时发生错误: {str(e)}'
        task_manager.update_task(
            task_id,
            status='failed',
            error=error_msg,
            completed_at=datetime.now().isoformat()
        )

def monitor_reindex_completion(task_id):
    """监控reindex任务完成"""
    def monitor():
        while True:
            try:
                task = task_manager.get_task(task_id)
                if not task:
                    break
                
                # 检查任务状态
                if task['status'] == 'cancelled':
                    task_manager.add_task_log(task_id, 'INFO', '任务已被用户取消')
                    break
                if task['status'] != 'running':
                    break
                
                all_completed = True
                total_success = 0
                total_failed = 0
                
                config = task['config']
                target_es, target_version = create_es_client(config['target_config'])
                source_es, _ = create_es_client({**config['source_config'], 'is_remote': True})
                
                for index_name, es_task_info in task['es_tasks'].items():
                    es_task_id = es_task_info['task_id']
                    
                    try:
                        # 使用不带detailed参数的API调用
                        response = target_es.tasks.get(task_id=es_task_id)
                        
                        # 检查是否有错误
                        if 'error' in response:
                            error_info = response['error']
                            error_msg = json.dumps(error_info, ensure_ascii=False)
                            task_manager.add_task_log(task_id, 'ERROR', f'❌ 索引 {index_name} reindex失败: {error_msg}')
                            task_manager.update_index_result(task_id, index_name, 'failed', error_msg)
                            total_failed += 1
                            continue
                        
                        if not response.get('completed', False):
                            all_completed = False
                            # 获取进度信息
                            task_info = response.get('task', {})
                            status = task_info.get('status', {})
                            if status:
                                total = status.get('total', 0)
                                created = status.get('created', 0)
                                updated = status.get('updated', 0)
                                deleted = status.get('deleted', 0)
                                version_conflicts = status.get('version_conflicts', 0)
                                
                                # 计算进度百分比和速率
                                processed = created + updated + deleted
                                if total > 0:
                                    progress_percent = (processed / total) * 100
                                else:
                                    progress_percent = 0
                                
                                # 更新详细进度信息
                                task_manager.update_detailed_progress(task_id, index_name, {
                                    'total_docs': total,
                                    'docs_processed': processed,
                                    'progress_percent': progress_percent,
                                    'created': created,
                                    'updated': updated,
                                    'deleted': deleted,
                                    'version_conflicts': version_conflicts
                                })
                                
                                # 更新任务日志
                                task_manager.add_task_log(task_id, 'INFO', 
                                    f'📊 索引 {index_name} 进度: '
                                    f'总数={total}, '
                                    f'已处理={processed} ({progress_percent:.1f}%), '
                                    f'已创建={created}, '
                                    f'已更新={updated}, '
                                    f'已删除={deleted}, '
                                    f'版本冲突={version_conflicts}'
                                )
                        else:
                            # 任务完成，检查结果
                            task_info = response.get('task', {})
                            status = task_info.get('status', {})
                            
                            total_docs = status.get('total', 0)
                            created = status.get('created', 0)
                            updated = status.get('updated', 0)
                            deleted = status.get('deleted', 0)
                            version_conflicts = status.get('version_conflicts', 0)
                            noops = status.get('noops', 0)
                            failures = status.get('failures', [])
                            
                            processed_docs = created + updated + deleted
                            
                            # 检查源索引文档数量
                            try:
                                source_count = source_es.count(index=index_name)['count']
                                if source_count > 0 and processed_docs == 0:
                                    error_msg = f"源索引有{source_count}个文档，但没有文档被迁移"
                                    task_manager.add_task_log(task_id, 'ERROR', f'❌ 索引 {index_name} reindex异常: {error_msg}')
                                    task_manager.update_index_result(task_id, index_name, 'failed', error_msg)
                                    total_failed += 1
                                    continue
                            except Exception as e:
                                task_manager.add_task_log(task_id, 'WARNING', f'⚠️ 无法获取源索引 {index_name} 文档数量: {str(e)}')
                            
                            # 更新最终结果
                            success_msg = (
                                f"✅ 索引 {index_name} reindex完成: "
                                f"总数={total_docs}, "
                                f"创建={created}, "
                                f"更新={updated}, "
                                f"删除={deleted}, "
                                f"跳过={noops}, "
                                f"冲突={version_conflicts}"
                            )
                            task_manager.add_task_log(task_id, 'INFO', success_msg)
                            task_manager.update_index_result(
                                task_id,
                                index_name,
                                'success',
                                success_msg,
                                docs_count=processed_docs,
                                conflicts=version_conflicts
                            )
                            total_success += 1
                            
                    except Exception as e:
                        error_msg = f'监控索引 {index_name} reindex进度时发生错误: {str(e)}'
                        task_manager.add_task_log(task_id, 'ERROR', error_msg)
                        task_manager.update_index_result(task_id, index_name, 'failed', error_msg)
                        total_failed += 1
                
                # 更新总体进度
                total_indices = len(task['es_tasks'])
                completed_indices = total_success + total_failed
                overall_progress = int((completed_indices / total_indices) * 100) if total_indices > 0 else 0
                
                task_manager.update_task(
                    task_id,
                    progress=overall_progress,
                    processed_indices=completed_indices
                )
                
                # 如果所有任务都完成了
                if all_completed:
                    final_status = 'completed' if total_failed == 0 else 'failed'
                    task_manager.update_task(
                        task_id,
                        status=final_status,
                        completed_at=datetime.now().isoformat(),
                        results={
                            'total_indices': total_indices,
                            'success_count': total_success,
                            'failed_count': total_failed
                        }
                    )
                    break
                
                # 等待一段时间再继续检查
                time.sleep(5)
                
            except Exception as e:
                task_manager.add_task_log(task_id, 'ERROR', f'监控任务时发生错误: {str(e)}')
                time.sleep(5)
    
    # 启动监控线程
    thread = threading.Thread(target=monitor)
    thread.daemon = True
    thread.start()


def create_es_client(config):
    """创建ES客户端并检测版本"""
    try:
        # 验证和转换基本参数
        if not isinstance(config, dict):
            raise ValueError("配置必须是字典类型")
        
        # 检查是否是完整URL
        host = config.get('host', '')
        if '://' in host:
            # 如果host包含完整URL，直接使用它，不修改端口号
            url = host.rstrip('/')
            if config.get('username') and config.get('password'):
                # 在URL中插入认证信息
                scheme, rest = url.split('://', 1)
                url = f"{scheme}://{config['username']}:{config['password']}@{rest}"
            
            # 如果有额外的path，添加到URL中
            if config.get('path'):
                path = str(config['path']).strip('/')
                if path:
                    url = f"{url}/{path}"
        else:
            # 使用分离参数构建URL
            # 确保必要参数存在
            required_params = ['scheme', 'host']
            for param in required_params:
                if param not in config:
                    raise ValueError(f"缺少必要参数: {param}")
            
            # 构建URL
            url = f"{config['scheme']}://"
            if config.get('username') and config.get('password'):
                url += f"{config['username']}:{config['password']}@"
            
            # 处理IPv6地址
            if ':' in host and not host.startswith('['):
                host = f"[{host}]"
            
            # 处理端口号
            port = config.get('port')
            if port is not None:
                try:
                    port = int(port)
                except (TypeError, ValueError):
                    port = None
            
            # 如果没有有效的端口号，使用默认值
            if not port:
                port = 443 if config['scheme'] == 'https' else 9200
            
            url += f"{host}:{port}"
            
            # 如果有path，添加到URL中
            if config.get('path'):
                path = str(config['path']).strip('/')
                if path:
                    url = f"{url}/{path}"
        
        # 设置默认参数
        default_params = {
            'request_timeout': 60,
            'max_retries': 3,
            'retry_on_timeout': True,
            'verify_certs': False,
            'ssl_show_warn': False,
            'sniff_on_start': False,
            'sniff_on_connection_fail': False,
            'sniffer_timeout': 60,
            'http_compress': True
        }
        
        # 合并用户配置和默认参数
        es_params = default_params.copy()
        
        # 处理数值参数
        for param in ['request_timeout', 'max_retries', 'sniffer_timeout']:
            if param in config:
                try:
                    value = int(config[param])
                    if value > 0:  # 确保值是正数
                        es_params[param] = value
                except (TypeError, ValueError):
                    pass  # 保持默认值
        
        # 处理布尔参数
        for param in ['retry_on_timeout', 'verify_certs', 'ssl_show_warn', 'sniff_on_start', 'sniff_on_connection_fail', 'http_compress']:
            if param in config:
                es_params[param] = bool(config[param])
        
        # 设置主机
        es_params['hosts'] = [url]
        
        # 添加自定义头
        headers = config.get('headers', {})
        if not isinstance(headers, dict):
            headers = {}
        headers.update({
            'User-Agent': 'ES-Migration-Tool/1.0',
            'X-Opaque-Id': 'migration_tool'
        })
        es_params['headers'] = headers
        
        # 处理SSL配置
        if url.startswith('https://'):
            ssl_params = {
                'ssl_assert_hostname': False,
                'ssl_assert_fingerprint': None,
                'ssl_version': None,
                'ssl_context': None
            }
            # 更新SSL参数（只使用非None值）
            for param, default in ssl_params.items():
                if param in config and config[param] is not None:
                    ssl_params[param] = config[param]
            es_params.update(ssl_params)
        
        # 添加证书配置
        cert_params = ['ca_certs', 'client_cert', 'client_key']
        for param in cert_params:
            if config.get(param):
                es_params[param] = str(config[param])
        
        # 创建客户端并测试连接
        es = Elasticsearch(**es_params)
        
        # 尝试获取集群信息
        try:
            info = es.info()
            version = info['version']['number']
            return es, version
        except Exception as e:
            # 如果info()失败，尝试ping()
            if es.ping():
                # 如果ping成功但info失败，可能是路径问题，尝试调整路径
                if config.get('path'):
                    # 移除路径后重试
                    base_url = url.split('/', 3)[:3]
                    es_params['hosts'] = ['/'.join(base_url)]
                    es = Elasticsearch(**es_params)
                    info = es.info()
                    version = info['version']['number']
                    return es, version
            raise Exception(f"无法获取ES版本信息: {str(e)}")
            
    except Exception as e:
        if isinstance(e, ValueError):
            raise
        raise Exception(f"创建ES客户端失败: {str(e)}")


def run_migration_task(source_config, target_config, socketio_instance):
    """后台运行迁移任务"""
    global migration_status
    
    try:
        migration_status['running'] = True
        migration_status['current_step'] = '初始化迁移工具...'
        
        # 设置日志处理器
        logger = logging.getLogger('es_index_migrator')
        web_handler = WebLogHandler()
        web_handler.setFormatter(logging.Formatter('%(message)s'))
        logger.addHandler(web_handler)
        logger.setLevel(logging.INFO)
        
        # 生成基于IP的唯一文件名
        migration_filename = generate_preview_filename(source_config, target_config, "migration")
        
        # 发送文件名信息
        socketio.emit('log_message', {
            'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'level': 'INFO',
            'message': f'📁 迁移文件: {migration_filename}'
        })
        
        # 创建迁移器
        source_es, source_version = create_es_client(source_config)
        target_es, target_version = create_es_client(target_config)
        migrator = ESIndexMigrator(source_config, target_config, source_es=source_es, target_es=target_es)
        
        migration_status['current_step'] = '导出索引结构...'
        
        # 导出索引结构
        exported_data = migrator.export_index_structures(migration_filename)
        
        migration_status['current_step'] = '在目标ES中创建索引...'
        
        # 导入索引结构
        import_results = migrator.import_index_structures(migration_filename)
        
        migration_status['current_step'] = '迁移完成'
        migration_status['results'] = {
            'exported_count': len(exported_data),
            'import_results': import_results,
            'migration_file': migration_filename
        }
        migration_status['running'] = False
        
        # 清理日志处理器
        logger.removeHandler(web_handler)
        
    except Exception as e:
        migration_status['running'] = False
        migration_status['current_step'] = f'迁移失败: {str(e)}'


@app.route('/api/start_migration', methods=['POST'])
def start_migration():
    """开始迁移"""
    global migration_status
    
    if migration_status['running']:
        return jsonify({
            'success': False,
            'message': '迁移正在进行中，请稍候...'
        })
    
    try:
        data = request.json
        source_config = data.get('source_config')
        target_config = data.get('target_config')
        
        # 在后台线程中运行迁移
        thread = threading.Thread(
            target=run_migration_task,
            args=(source_config, target_config, socketio)
        )
        thread.daemon = True
        thread.start()
        
        return jsonify({
            'success': True,
            'message': '迁移已开始，请查看实时日志...'
        })
        
    except Exception as e:
        return jsonify({
            'success': False,
            'message': f'启动迁移失败: {str(e)}'
        })


@app.route('/api/migration_status')
def get_migration_status():
    """获取迁移状态"""
    return jsonify(migration_status)


@app.route('/api/get_index_structure', methods=['POST'])
def get_index_structure():
    """获取索引结构"""
    try:
        data = request.json
        source_config = data.get('source_config')
        index_name = data.get('index_name')
        
        if not index_name:
            return jsonify({
                'success': False,
                'message': '请提供索引名称'
            })
        
        # 创建ES客户端
        source_es, _ = create_es_client(source_config)
        
        # 获取索引结构
        try:
            # 获取索引信息
            index_info = source_es.indices.get(index=index_name)
            mappings = index_info[index_name].get('mappings', {})
            settings = index_info[index_name].get('settings', {})
            
            # 获取最新的一条数据
            latest_doc = None
            try:
                result = source_es.search(
                    index=index_name,
                    body={
                        "size": 1,
                        "sort": [{"_id": "desc"}],
                        "query": {"match_all": {}}
                    }
                )
                if result['hits']['hits']:
                    latest_doc = result['hits']['hits'][0]['_source']
            except Exception as e:
                app.logger.error(f"获取最新数据失败: {str(e)}")
                latest_doc = {"error": "获取数据失败"}
            
            # 构建结构信息
            structure = {
                'mappings': mappings,
                'settings': settings,
                'latest_document': latest_doc
            }
            
            return jsonify({
                'success': True,
                'structure': structure
            })
            
        except NotFoundError:
            return jsonify({
                'success': False,
                'message': f'索引 {index_name} 不存在'
            })
            
    except Exception as e:
        return jsonify({
            'success': False,
            'message': f'获取索引结构失败: {str(e)}'
        })


@socketio.on('connect')
def handle_connect():
    """WebSocket连接处理"""
    emit('connected', {'message': '连接成功'})


@socketio.on('disconnect')
def handle_disconnect():
    """WebSocket断开处理"""
    print('客户端断开连接')


def find_available_port(start_port=5000):
    """查找可用端口"""
    import socket
    for port in range(start_port, start_port + 100):
        try:
            with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
                s.bind(('localhost', port))
                return port
        except OSError:
            continue
    return None

if __name__ == '__main__':
    # 确保templates目录存在
    import os
    if not os.path.exists('templates'):
        os.makedirs('templates')
    
    # 启动ES任务监控线程
    start_es_task_monitor()
    print("🔄 ES任务监控已启动")
    
    # 查找可用端口，跳过5000因为可能被系统占用
    port = find_available_port(5001)
    if port is None:
        print("❌ 无法找到可用端口")
        exit(1)
    
    print("🚀 启动ES迁移工具Web界面...")
    print(f"📱 访问地址: http://localhost:{port}")
    
    try:
        socketio.run(app, debug=False, host='0.0.0.0', port=port)
    except KeyboardInterrupt:
        print("\n👋 服务已停止")
    except Exception as e:
        print(f"❌ 启动失败: {e}") 