#!/usr/bin/env python3
"""
ES Archive 高级场景示例

演示复杂的使用场景，包括大数据集处理、性能优化、自动化等。
"""

import os
import sys
import time
import threading
from datetime import datetime, timezone, timedelta
from pathlib import Path

# 添加项目路径
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))

from es_archive.core.backup import BackupManager, BackupStrategy
from es_archive.core.restore import RestoreManager, RestoreStrategy
from es_archive.core.storage import CompressionManager
from es_archive.monitoring.progress import ProgressMonitor
from es_archive.monitoring.metrics import MetricsCollector
from es_archive.monitoring.alerts import AlertManager, AlertLevel, AlertChannel
from es_archive.utils.es_client import ESClient


def setup_es_client():
    """设置ES客户端"""
    config = {
        'hosts': [os.getenv('ES_HOST', 'localhost:9200')],
        'username': os.getenv('ES_USERNAME', 'elastic'),
        'password': os.getenv('ES_PASSWORD', 'password'),
        'timeout': 60
    }
    return ESClient(config)


def scenario_large_dataset_backup():
    """场景：大数据集备份优化"""
    print("=== 大数据集备份优化场景 ===")
    
    es_client = setup_es_client()
    
    # 高性能备份配置
    backup_config = {
        'batch_size': 5000,        # 增大批量大小
        'parallel_workers': 8,     # 增加并行线程
        'compression': 'lz4',      # 使用快速压缩
        'validate_data': False     # 跳过验证以提高速度
    }
    
    backup_manager = BackupManager(es_client, backup_config)
    
    # 创建高性能备份策略
    strategy = BackupStrategy(
        strategy_type="full",
        batch_size=5000,
        parallel_workers=8,
        compression="lz4",
        validate_data=False
    )
    
    # 启用监控
    with ProgressMonitor() as monitor:
        with MetricsCollector() as collector:
            
            def progress_callback(info):
                progress = info.get('progress', {})
                summary = info.get('summary', {})
                
                print(f"  总体进度: {summary.get('overall_percentage', 0):.1f}%")
                print(f"  活跃任务: {summary.get('running_tasks', 0)}")
                
                # 检查性能瓶颈
                bottlenecks = collector.detect_bottlenecks()
                if bottlenecks:
                    for bottleneck in bottlenecks:
                        print(f"  ⚠ 性能瓶颈: {bottleneck['message']}")
            
            monitor.add_callback(progress_callback)
            
            try:
                start_time = time.time()
                
                backup_metadata = backup_manager.backup_index(
                    index="large_index",
                    output_dir="./backups/large",
                    strategy=strategy
                )
                
                end_time = time.time()
                duration = end_time - start_time
                
                print(f"✓ 大数据集备份完成")
                print(f"  耗时: {duration:.2f} 秒")
                print(f"  文档数: {backup_metadata.backed_up_documents:,}")
                print(f"  吞吐量: {backup_metadata.backed_up_documents / duration:.2f} docs/sec")
                
                # 获取性能摘要
                metrics_summary = collector.get_metrics_summary(minutes=int(duration/60) + 1)
                print(f"  平均CPU: {metrics_summary.get('cpu', {}).get('avg', 0):.1f}%")
                print(f"  平均内存: {metrics_summary.get('memory', {}).get('avg', 0):.1f}%")
                
            except Exception as e:
                print(f"✗ 大数据集备份失败: {e}")


def scenario_compression_optimization():
    """场景：压缩算法优化"""
    print("\n=== 压缩算法优化场景 ===")
    
    # 创建测试数据
    test_data = b"This is test data for compression. " * 1000
    
    compression_manager = CompressionManager()
    
    # 基准测试不同压缩算法
    algorithms = ['none', 'gzip', 'lz4']
    results = {}
    
    print("压缩算法性能对比:")
    for algorithm in algorithms:
        try:
            start_time = time.time()
            compressed_data, info = compression_manager.compress_data(
                test_data, algorithm=algorithm, level=6
            )
            compression_time = time.time() - start_time
            
            start_time = time.time()
            decompressed_data, decomp_info = compression_manager.decompress_data(
                compressed_data, algorithm=algorithm
            )
            decompression_time = time.time() - start_time
            
            results[algorithm] = {
                'compression_ratio': info['compression_ratio'],
                'compression_time': compression_time,
                'decompression_time': decompression_time,
                'total_time': compression_time + decompression_time
            }
            
            print(f"  {algorithm}:")
            print(f"    压缩率: {info['compression_ratio']:.2%}")
            print(f"    压缩时间: {compression_time:.3f}s")
            print(f"    解压时间: {decompression_time:.3f}s")
            print(f"    总时间: {compression_time + decompression_time:.3f}s")
            
        except Exception as e:
            print(f"  {algorithm}: 失败 - {e}")
    
    # 推荐最佳算法
    if results:
        best_compression = min(results.items(), key=lambda x: x[1]['compression_ratio'])
        best_speed = min(results.items(), key=lambda x: x[1]['total_time'])
        
        print(f"\n推荐:")
        print(f"  最佳压缩率: {best_compression[0]} ({best_compression[1]['compression_ratio']:.2%})")
        print(f"  最快速度: {best_speed[0]} ({best_speed[1]['total_time']:.3f}s)")


def scenario_automated_backup_with_alerts():
    """场景：自动化备份与告警"""
    print("\n=== 自动化备份与告警场景 ===")
    
    # 配置告警系统
    alert_config = {
        'email': {
            'smtp_server': 'smtp.example.com',
            'username': 'alerts@example.com',
            'password': 'password',
            'to_addrs': ['admin@example.com']
        }
    }
    
    alert_manager = AlertManager(alert_config)
    
    # 添加自定义告警规则
    from es_archive.monitoring.alerts import AlertRule, AlertLevel, AlertChannel
    
    # 备份成功告警
    success_rule = AlertRule(
        name="backup_success",
        condition=lambda ctx: ctx.get('backup_status') == 'completed',
        level=AlertLevel.INFO,
        channels=[AlertChannel.LOG],
        message_template="备份成功完成: {backup_id}, 文档数: {document_count}",
        cooldown_seconds=0  # 成功通知不需要冷却
    )
    alert_manager.add_rule(success_rule)
    
    # 备份耗时过长告警
    slow_backup_rule = AlertRule(
        name="slow_backup",
        condition=lambda ctx: ctx.get('backup_duration', 0) > 3600,  # 超过1小时
        level=AlertLevel.WARNING,
        channels=[AlertChannel.LOG],
        message_template="备份耗时过长: {backup_duration:.2f}秒",
        cooldown_seconds=1800
    )
    alert_manager.add_rule(slow_backup_rule)
    
    # 模拟自动化备份流程
    es_client = setup_es_client()
    backup_manager = BackupManager(es_client, {})
    
    def automated_backup_job():
        """自动化备份任务"""
        try:
            start_time = time.time()
            
            # 创建增量备份策略
            strategy = BackupStrategy(
                strategy_type="incremental",
                time_field="@timestamp",
                time_range_start=datetime.now(timezone.utc) - timedelta(hours=1),
                time_range_end=datetime.now(timezone.utc)
            )
            
            backup_metadata = backup_manager.backup_index(
                index="logs_index",
                output_dir="./backups/automated",
                strategy=strategy
            )
            
            end_time = time.time()
            duration = end_time - start_time
            
            # 触发告警检查
            context = {
                'backup_status': backup_metadata.status,
                'backup_id': backup_metadata.backup_id,
                'document_count': backup_metadata.backed_up_documents,
                'backup_duration': duration
            }
            
            alert_manager.check_alerts(context)
            
            print(f"✓ 自动化备份完成: {backup_metadata.backup_id}")
            
        except Exception as e:
            # 备份失败告警
            context = {
                'backup_status': 'failed',
                'error_message': str(e)
            }
            alert_manager.check_alerts(context)
            print(f"✗ 自动化备份失败: {e}")
    
    # 执行自动化备份
    automated_backup_job()
    
    # 显示告警历史
    alert_history = alert_manager.get_alert_history(hours=1)
    print(f"告警历史: {len(alert_history)} 条")
    for alert in alert_history:
        print(f"  [{alert.level.value}] {alert.message}")


def scenario_disaster_recovery():
    """场景：灾难恢复演练"""
    print("\n=== 灾难恢复演练场景 ===")
    
    es_client = setup_es_client()
    
    # 1. 创建测试备份
    print("1. 创建测试备份...")
    backup_manager = BackupManager(es_client, {})
    strategy = BackupStrategy(strategy_type="full")
    
    try:
        backup_metadata = backup_manager.backup_index(
            index="critical_data",
            output_dir="./backups/disaster_recovery",
            strategy=strategy
        )
        backup_id = backup_metadata.backup_id
        print(f"   ✓ 备份创建完成: {backup_id}")
        
    except Exception as e:
        print(f"   ✗ 备份创建失败: {e}")
        return
    
    # 2. 模拟灾难（删除索引）
    print("2. 模拟灾难场景...")
    try:
        # 注意：这里只是演示，实际环境中要谨慎操作
        print("   ⚠ 模拟索引损坏（实际不执行删除）")
        
    except Exception as e:
        print(f"   模拟灾难失败: {e}")
    
    # 3. 执行灾难恢复
    print("3. 执行灾难恢复...")
    restore_manager = RestoreManager(es_client, {})
    
    # 创建恢复策略
    restore_strategy = RestoreStrategy(
        strategy_type="full",
        target_index="critical_data_recovered",
        create_index=True,
        validate_data=True
    )
    
    try:
        start_time = time.time()
        
        restore_metadata = restore_manager.restore_from_backup(
            backup_id=backup_id,
            strategy=restore_strategy
        )
        
        end_time = time.time()
        recovery_time = end_time - start_time
        
        print(f"   ✓ 灾难恢复完成")
        print(f"   恢复时间: {recovery_time:.2f} 秒")
        print(f"   恢复文档: {restore_metadata.restored_documents:,}")
        print(f"   RTO (恢复时间目标): {recovery_time:.2f}s")
        
        # 计算RPO (恢复点目标)
        backup_time = backup_metadata.start_time
        current_time = datetime.now(timezone.utc)
        rpo = (current_time - backup_time).total_seconds()
        print(f"   RPO (恢复点目标): {rpo:.2f}s")
        
    except Exception as e:
        print(f"   ✗ 灾难恢复失败: {e}")
    
    # 4. 验证恢复结果
    print("4. 验证恢复结果...")
    try:
        from es_archive.core.validator import DataIntegrityValidator
        
        validator = DataIntegrityValidator(es_client)
        validation_result = validator.validate_restore_integrity(
            backup_id, "critical_data_recovered"
        )
        
        if validation_result.valid:
            print("   ✓ 数据完整性验证通过")
        else:
            print("   ✗ 数据完整性验证失败")
            for error in validation_result.errors:
                print(f"     - {error}")
        
    except Exception as e:
        print(f"   验证失败: {e}")


def scenario_multi_index_backup():
    """场景：多索引批量备份"""
    print("\n=== 多索引批量备份场景 ===")
    
    es_client = setup_es_client()
    backup_manager = BackupManager(es_client, {})
    
    # 要备份的索引列表
    indices = ["logs-2023-01", "logs-2023-02", "logs-2023-03"]
    
    # 使用线程池并行备份
    import concurrent.futures
    
    def backup_single_index(index_name):
        """备份单个索引"""
        try:
            strategy = BackupStrategy(
                strategy_type="full",
                compression="gzip",
                batch_size=2000
            )
            
            backup_metadata = backup_manager.backup_index(
                index=index_name,
                output_dir=f"./backups/multi/{index_name}",
                strategy=strategy
            )
            
            return {
                'index': index_name,
                'status': 'success',
                'backup_id': backup_metadata.backup_id,
                'documents': backup_metadata.backed_up_documents
            }
            
        except Exception as e:
            return {
                'index': index_name,
                'status': 'failed',
                'error': str(e)
            }
    
    # 并行执行备份
    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
        print("开始并行备份...")
        
        # 提交所有备份任务
        future_to_index = {
            executor.submit(backup_single_index, index): index 
            for index in indices
        }
        
        results = []
        for future in concurrent.futures.as_completed(future_to_index):
            result = future.result()
            results.append(result)
            
            if result['status'] == 'success':
                print(f"  ✓ {result['index']}: {result['documents']:,} 文档")
            else:
                print(f"  ✗ {result['index']}: {result['error']}")
    
    # 汇总结果
    successful = [r for r in results if r['status'] == 'success']
    failed = [r for r in results if r['status'] == 'failed']
    
    print(f"\n批量备份完成:")
    print(f"  成功: {len(successful)} 个索引")
    print(f"  失败: {len(failed)} 个索引")
    
    total_documents = sum(r['documents'] for r in successful)
    print(f"  总文档数: {total_documents:,}")


def main():
    """主函数"""
    print("ES Archive 高级场景示例")
    print("=" * 60)
    
    # 检查ES连接
    try:
        es_client = setup_es_client()
        cluster_info = es_client.get_cluster_info()
        print(f"✓ ES连接成功: {cluster_info.get('cluster_name', 'unknown')}")
    except Exception as e:
        print(f"✗ ES连接失败: {e}")
        print("请确保Elasticsearch正在运行并且配置正确")
        return
    
    # 运行高级场景
    scenarios = [
        scenario_large_dataset_backup,
        scenario_compression_optimization,
        scenario_automated_backup_with_alerts,
        scenario_disaster_recovery,
        scenario_multi_index_backup
    ]
    
    for i, scenario in enumerate(scenarios, 1):
        try:
            print(f"\n[{i}/{len(scenarios)}] 执行场景...")
            scenario()
            
        except KeyboardInterrupt:
            print("\n用户中断执行")
            break
        except Exception as e:
            print(f"\n✗ 场景执行失败: {e}")
            continue
    
    print("\n" + "=" * 60)
    print("✓ 高级场景演示完成")


if __name__ == "__main__":
    main()
