from typing import Dict, Any
from datetime import datetime, timedelta
from ..utils.cluster_manager import ClusterManager
from ..utils.monitoring import ClusterMonitor

def demo_monitoring():
    # 初始化集群管理器和监控器
    cluster = ClusterManager()
    monitor = ClusterMonitor()
    
    # 1. 监控HDFS状态
    print("\n1. 监控HDFS状态")
    hdfs_status = cluster.check_hdfs_status()
    monitor.record_metrics('hdfs', hdfs_status)
    monitor.log_operation(
        'hdfs_status_check',
        {'status': hdfs_status}
    )
    
    # 2. 监控YARN资源使用情况
    print("\n2. 监控YARN资源")
    yarn_metrics = cluster.get_yarn_metrics()
    monitor.record_metrics('yarn', yarn_metrics)
    
    # 检查资源使用率是否过高
    if yarn_metrics.get('occupiedMemoryPercent', 0) > 80:
        monitor.alert(
            'yarn',
            'YARN内存使用率超过80%',
            'warning'
        )
    
    # 3. 提交并监控Spark作业
    print("\n3. 提交并监控Spark作业")
    spark_job = cluster.submit_spark_job(
        "/path/to/spark_job.py",
        ["--input", "/user/data/input"]
    )
    
    if isinstance(spark_job, str) and "ID:" in spark_job:
        job_id = spark_job.split("ID:")[-1].strip()
        monitor.log_operation(
            'spark_job_submit',
            {'job_id': job_id, 'status': 'submitted'}
        )
        
        # 监控作业进度
        print("开始监控Spark作业进度...")
        monitor.monitor_job_progress(
            job_id,
            cluster.get_spark_job_status,
            interval=30,
            timeout=3600  # 1小时超时
        )
    
    # 4. 提交并监控Flink作业
    print("\n4. 提交并监控Flink作业")
    flink_job = cluster.submit_flink_job(
        "/path/to/flink_job.jar",
        "com.example.FlinkJob",
        ["--input", "/user/data/input"]
    )
    
    if isinstance(flink_job, str) and "ID:" in flink_job:
        job_id = flink_job.split("ID:")[-1].strip()
        monitor.log_operation(
            'flink_job_submit',
            {'job_id': job_id, 'status': 'submitted'}
        )
        
        # 监控作业进度
        print("开始监控Flink作业进度...")
        monitor.monitor_job_progress(
            job_id,
            cluster.get_flink_job_status,
            interval=30,
            timeout=3600  # 1小时超时
        )
    
    # 5. 查看监控历史
    print("\n5. 查看监控历史")
    # 获取过去1小时的HDFS指标
    end_time = datetime.now()
    start_time = end_time - timedelta(hours=1)
    
    hdfs_history = monitor.get_metrics_history(
        'hdfs',
        start_time=start_time,
        end_time=end_time
    )
    print(f"过去1小时HDFS指标记录数: {len(hdfs_history)}")
    
    # 6. 持续监控YARN应用
    print("\n6. 监控YARN应用状态")
    running_apps = cluster.list_yarn_applications(status="RUNNING")
    for app in running_apps:
        monitor.log_operation(
            'yarn_app_status',
            {
                'app_id': app.get('id'),
                'name': app.get('name'),
                'state': app.get('state')
            }
        )

if __name__ == "__main__":
    demo_monitoring()