from typing import Dict, Any
from ..utils.cluster_manager import ClusterManager

def main():
    # 初始化集群管理器
    cluster = ClusterManager()
    
    # 检查HDFS状态
    print("\n1. 检查HDFS状态：")
    hdfs_status = cluster.check_hdfs_status()
    print(f"HDFS状态: {hdfs_status}")
    
    # 列出HDFS目录
    print("\n2. 列出HDFS目录内容：")
    hdfs_files = cluster.list_hdfs_directory("/user/data")
    for file in hdfs_files:
        print(f"文件: {file.get('pathSuffix')}, 大小: {file.get('length')} 字节")
    
    # 提交Spark作业示例
    print("\n3. 提交Spark作业：")
    spark_job = cluster.submit_spark_job(
        "/path/to/spark_job.py",
        ["--input", "/user/data/input", "--output", "/user/data/output"]
    )
    print(f"Spark作业提交结果: {spark_job}")
    
    # 获取Spark作业状态
    if isinstance(spark_job, str) and "ID:" in spark_job:
        job_id = spark_job.split("ID:")[-1].strip()
        status = cluster.get_spark_job_status(job_id)
        print(f"Spark作业状态: {status}")
    
    # 提交Flink作业示例
    print("\n4. 提交Flink作业：")
    flink_job = cluster.submit_flink_job(
        "/path/to/flink_job.jar",
        "com.example.FlinkJob",
        ["--input", "/user/data/input", "--output", "/user/data/output"]
    )
    print(f"Flink作业提交结果: {flink_job}")
    
    # 获取Flink作业状态
    if isinstance(flink_job, str) and "ID:" in flink_job:
        job_id = flink_job.split("ID:")[-1].strip()
        status = cluster.get_flink_job_status(job_id)
        print(f"Flink作业状态: {status}")
    
    # 获取YARN集群指标
    print("\n5. YARN集群指标：")
    yarn_metrics = cluster.get_yarn_metrics()
    print(f"YARN指标: {yarn_metrics}")
    
    # 列出YARN应用
    print("\n6. 正在运行的YARN应用：")
    running_apps = cluster.list_yarn_applications(status="RUNNING")
    for app in running_apps:
        print(f"应用ID: {app.get('id')}, 名称: {app.get('name')}, 状态: {app.get('state')}")

if __name__ == "__main__":
    main()