"""
Pipeline框架主入口 - 机器学习专用版本
"""
import asyncio
import argparse
import sys
import os
from datetime import datetime

# 添加当前目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))

from core.dag import DAG
from core.state_manager import StateManager
from core.plugin_manager import PluginManager
from core.executor import PipelineExecutor
from core.logger import get_logger

# 获取日志记录器
logger = get_logger(__name__)


def load_pipeline_config(config_path: str) -> dict:
    """加载pipeline配置文件"""
    import yaml
    with open(config_path, 'r', encoding='utf-8') as f:
        return yaml.safe_load(f)


def build_dag_from_config(config: dict) -> DAG:
    """从配置构建DAG"""
    dag = DAG()

    # 设置全局配置
    global_config = config.get("global_config", {})
    dag.global_config = global_config

    # 添加节点
    nodes = config.get("nodes", {})
    for node_id, node_config in nodes.items():
        # 提取资源配置
        resources = node_config.get("resources", {})
        # 合并节点配置（包含资源配置）
        full_config = {
            "config": node_config.get("config", {}),
            "resources": resources
        }
        dag.add_node(
            node_id=node_id,
            plugin_type=node_config["plugin_type"],
            config=full_config
        )

    # 添加边
    edges = config.get("edges", [])
    for edge in edges:
        dag.add_edge(edge["from"], edge["to"])

    # 验证DAG
    if not dag.validate():
        raise ValueError("Invalid DAG configuration: contains cycles")

    return dag


async def main():
    parser = argparse.ArgumentParser(description="ML Pipeline Framework")
    parser.add_argument("--config", default="config/data_pipeline_4.yaml",
                        help="Pipeline configuration file")
    parser.add_argument("--pipeline-id", default="data_pipeline_demo_" + datetime.now().strftime("%Y%m%d_%H%M%S"),
                        help="Pipeline ID")
    parser.add_argument("--timeout", type=float, default=30.0,
                        help="Pipeline execution timeout in seconds (default: 30.0)")
    parser.add_argument("--max-concurrent", type=int, default=3,
                        help="Maximum concurrent tasks (default: 3)")
    args = parser.parse_args()

    logger.info(f"Loading pipeline configuration from: {args.config}")
    config = load_pipeline_config(args.config)

    logger.info("Building DAG from configuration...")
    dag = build_dag_from_config(config)

    logger.info("Initializing pipeline components...")

    plugin_manager = PluginManager(["src/plugins"])
    state_manager = StateManager(args.pipeline_id)
    executor = PipelineExecutor(
        dag=dag,
        state_manager=state_manager,
        plugin_manager=plugin_manager,
        max_concurrent_tasks=args.max_concurrent,
        pipeline_timeout=args.timeout
    )

    logger.info(f"Starting ML pipeline execution (timeout: {args.timeout}s, max concurrent: {args.max_concurrent})...")
    success = await executor.execute()

    logger.info(f"ML Pipeline execution {'SUCCEEDED' if success else 'FAILED (TIMEOUT or ERROR)'}")

    pipeline_status = state_manager.get_pipeline_status()
    logger.info(f"Pipeline ID: {pipeline_status['pipeline_id']}")
    logger.info(f"Status: {pipeline_status['status']}")
    logger.info(f"Start time: {pipeline_status['start_time']}")
    logger.info(f"End time: {pipeline_status['end_time']}")

    logger.info("Node execution details:")
    for node_id, node_status in pipeline_status["nodes"].items():
        status = node_status['status']
        duration = "N/A"
        if node_status['start_time'] and node_status['end_time']:
            duration = "completed"
        logger.info(f"{node_id:20} | {status:10} | {duration}")

    # 显示资源使用报告
    resource_usage = executor.resource_manager.get_resource_usage_report()
    if resource_usage:
        logger.info("Resource usage report:")
        for node_id, usage in resource_usage.items():
            status = usage.get('status', 'unknown')
            resources = usage.get('resources', {})
            logger.info(f"{node_id:20} | Status: {status:10} | "
                        f"CPU: {resources.get('cpu_limit', 'N/A'):>5} | "
                        f"Memory: {resources.get('memory_limit', 'N/A'):>6} | "
                        f"Threads: {resources.get('thread_limit', 'N/A'):>3}")

    if success:
        logger.info("Final ML Pipeline Result:")
        final_result = state_manager.get_node_result("model_registry")
        if final_result:
            registry_response = final_result.get("registry_response", {})
            logger.info(f"Model Registered: {registry_response.get('model_registered', False)}")
            logger.info(f"Registry Type: {registry_response.get('registry_type', 'unknown')}")
            accuracy = registry_response.get('model_performance', {}).get('accuracy', 'N/A')
            if isinstance(accuracy, float):
                logger.info(f"Test Accuracy: {accuracy:.3f}")
            else:
                logger.info(f"Test Accuracy: {accuracy}")
            logger.info(f"Reason: {registry_response.get('registration_reason', 'N/A')}")
        else:
            logger.info("No final result found")
    else:
        logger.error("Pipeline execution failed. Check logs for details.")

    logger.info(f"Output files are saved in: data/outputs/{args.pipeline_id}/")


if __name__ == "__main__":
    asyncio.run(main())