"""Implements evaluation of agents on a single transformer task."""

import asyncio
import json
import os
from typing import Any, Optional

import pandas as pd

from evaluation.utils.shared import (
    EvalMetadata,
    EvalOutput,
    codeact_user_response,
    compatibility_for_eval_history_pairs,
    make_metadata,
    prepare_dataset,
    reset_logger_for_multiprocessing,
    run_evaluation,
)
from openhands.controller.state.state import State
from openhands.core.config import (
    AppConfig,
    SandboxConfig,
    get_llm_config_arg,
    get_parser,
    load_app_config,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
from openhands.core.setup import create_controller, create_agent
from openhands.events import EventSource
from openhands.events.action import CmdRunAction, MessageAction
from openhands.events.observation import CmdOutputObservation
from openhands.runtime.base import Runtime
from openhands.utils.async_utils import call_async_from_sync
from workspace.TimeMixer.utils.metrics import metric

AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
    'CodeActAgent': codeact_user_response,
}

config = load_app_config()


def get_config(
    metadata: EvalMetadata,
) -> AppConfig:
    """Get the configuration for the evaluation."""
    # 使用已加载的全局配置作为基础
    config = load_app_config()
    config.default_agent = metadata.agent_class
    config.max_iterations = metadata.max_iterations
    config.runtime = 'eventstream'

    # 更新 sandbox 配置
    config.sandbox = SandboxConfig(
        enable_gpu=False,
        runtime_container_image='code_adaptation_agent',
        timeout=86400,
        base_container_image=None,
        enable_auto_lint=True,
        use_host_network=True,
        # runtime_extra_deps='echo "export PATH=/opt/conda/envs/timemix/bin:$PATH" >> ~/.bashrc ; echo "conda activate timemix" >> ~/.bashrc',
    )

    # 设置 LLM 配置
    config.set_llm_config(metadata.llm_config)
    # 记录完整配置信息
    logger.info('完整配置信息:')
    logger.info('基础配置:')
    logger.info(f'  - 默认代理: {config.default_agent}')
    logger.info(f'  - 最大迭代次数: {config.max_iterations}')
    logger.info(f'  - 运行时: {config.runtime}')
    logger.info(f'  - 调试模式: {config.debug}')

    logger.info('文件系统配置:')
    logger.info(f'  - 文件存储类型: {config.file_store}')
    logger.info(f'  - 文件存储路径: {config.file_store_path}')
    logger.info(f'  - 工作空间基础路径: {config.workspace_base}')
    logger.info(f'  - 工作空间挂载路径: {config.workspace_mount_path}')
    logger.info(f'  - 沙箱内工作空间路径: {config.workspace_mount_path_in_sandbox}')
    logger.info(f'  - 缓存目录: {config.cache_dir}')

    logger.info('沙箱配置:')
    logger.info(f'  - 容器镜像: {config.sandbox.runtime_container_image}')
    logger.info(f'  - 超时时间: {config.sandbox.timeout}秒')
    logger.info(f'  - 自动lint: {config.sandbox.enable_auto_lint}')
    logger.info(f'  - 使用主机网络: {config.sandbox.use_host_network}')

    logger.info('安全配置:')
    logger.info(f'  - 以openhands用户运行: {config.run_as_openhands}')
    logger.info(f'  - 最大文件上传大小: {config.file_uploads_max_file_size_mb}MB')
    logger.info(f'  - 限制文件类型: {config.file_uploads_restrict_file_types}')

    logger.info('LLM配置:')
    logger.info(f'  - 当前LLM配置: {metadata.llm_config}')
    return config


def initialize_runtime(
    runtime: Runtime,
    instance: pd.Series,  # this argument is not required
):
    """Initialize the runtime for the agent."""
    logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}")
    obs: CmdOutputObservation

    action = CmdRunAction(command='whoami')
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(f'当前用户: {obs.content}', extra={'msg_type': 'OBSERVATION'})
    assert obs.exit_code == 0

    # 设置python解释器路径
    action = CmdRunAction(
        command='echo "export PATH=/opt/conda/envs/timemix/bin:$PATH" >> ~/.bashrc'
    )
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(
        f'设置Python解释器路径: {obs.content}', extra={'msg_type': 'OBSERVATION'}
    )
    assert obs.exit_code == 0

    action = CmdRunAction(command='source ~/.bashrc')
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(f'更新环境变量: {obs.content}', extra={'msg_type': 'OBSERVATION'})
    assert obs.exit_code == 0

    action = CmdRunAction(command='which python')
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(
        f'设置环境变量后后 Python解释器路径: {obs.content}',
        extra={'msg_type': 'OBSERVATION'},
    )
    assert obs.exit_code == 0

    action = CmdRunAction(
        command='/opt/conda/envs/timemix/bin/python -c "import torch"'
    )
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(f'检查PyTorch导入: {obs.content}', extra={'msg_type': 'OBSERVATION'})
    assert obs.exit_code == 0

    action = CmdRunAction(command='cd /workspace')
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(f'Command output: {obs.content}', extra={'msg_type': 'OBSERVATION'})
    assert obs.exit_code == 0

    action = CmdRunAction(command='which python')
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(
        f'conda后 Python解释器路径: {obs.content}', extra={'msg_type': 'OBSERVATION'}
    )
    assert obs.exit_code == 0

    # 检查conda环境是否正确激活
    # action = CmdRunAction(command='conda activate timemix && which python')
    # action.timeout = 1800
    # logger.info(action, extra={'msg_type': 'ACTION'})
    # obs = runtime.run_action(action)
    # logger.info(f'检查conda环境: {obs.content}', extra={'msg_type': 'OBSERVATION'})
    # assert obs.exit_code == 0

    action = CmdRunAction(command='rm -rf /workspace/output/call_graph_temp.json')
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(f'Command output: {obs.content}', extra={'msg_type': 'OBSERVATION'})
    assert obs.exit_code == 0

    action = CmdRunAction(command='pip uninstall -y callgraph_tool')
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(f'Command output: {obs.content}', extra={'msg_type': 'OBSERVATION'})
    assert obs.exit_code == 0

    action = CmdRunAction(command='pip install -e /workspace/pkg/callgraph_tool')
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(f'Command output: {obs.content}', extra={'msg_type': 'OBSERVATION'})
    assert obs.exit_code == 0

    action = CmdRunAction(
        command='python -c "import torch; print(\'PyTorch version:\', torch.__version__); print(\'CUDA available:\', torch.cuda.is_available())"')
    logger.info(action, extra={'msg_type': 'ACTION'})
    obs = runtime.run_action(action)
    logger.info(f'Command output: {obs.content}', extra={'msg_type': 'OBSERVATION'})
    assert obs.exit_code == 0

    # Run cleangit.sh
    # action = CmdRunAction(command='bash cleangit.sh')
    # action.timeout = 1800
    # logger.info(action, extra={'msg_type': 'ACTION'})
    # obs = runtime.run_action(action)
    # assert obs.exit_code == 0
    # logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}")


def complete_runtime(
    runtime: Runtime,
    instance: pd.Series,  # this argument is not required
) -> dict[str, Any]:
    """Complete the runtime for the agent.
    TODO: Implement evaluation logic later.
    """
    logger.info(f"{'-' * 50} BEGIN Runtime Completion Fn {'-' * 50}")

    # TODO: 实现评估逻辑
    outputs = {
        'eval_output': '',
        'success': 1,  # 暂时默认成功
        'eval_exit_code': 0,
    }

    logger.info(f"{'-' * 50} END Runtime Completion Fn {'-' * 50}")
    return outputs


def process_instance(
        instance: pd.Series,
        metadata: EvalMetadata,
        history: str = '',
        reset_logger: bool = True,
):
    """Process a single instance."""
    config = get_config(metadata)
    # Setup logger
    if reset_logger:
        log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs')
        reset_logger_for_multiprocessing(logger, instance['instance_id'], log_dir)
    else:
        logger.info(f'Starting evaluation for instance {instance["instance_id"]}.')

    runtime = create_runtime(config)
    call_async_from_sync(runtime.connect)
    initialize_runtime(runtime, instance)

    # Prepare instruction
    instruction_path = os.path.join(os.path.dirname(__file__), 'instructions.txt')
    with open(instruction_path, 'r') as file:
        instruction = file.read().strip()

    if isinstance(history, str) and len(history) > 0:
        instruction = instruction + '\n' + history
    # Run the agent
    state: State | None = asyncio.run(
        run_controller(
            config=config,
            initial_user_action=MessageAction(content=instruction),
            runtime=runtime,
            fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get(
                metadata.agent_class
            ),
        )
    )
    assert state is not None
    # if state.metrics is not None:
    #     metrics = state.metrics.get()
    # else:
    metrics = {}

    test_result = complete_runtime(runtime, instance)

    # Save the output
    output = EvalOutput(
        instance_id=instance['instance_id'],
        instance=instance.to_dict(),
        instruction=instruction,
        metadata=metadata,
        history=compatibility_for_eval_history_pairs(state.history),
        test_result=test_result,
        metrics=metrics,
    )
    return output, compatibility_for_eval_history_pairs(state.history)


def start_task(
        instance_id: str = 'single_transformer_test',
        llm_config: Any = 'llm.gpt4o',
        agent_class: str = 'CodeActAgent',
        max_iterations: int = 30,
        eval_output_dir: str = 'evaluation_outputs',
        eval_note=None,
        next=None
):
    if llm_config is None:
        raise ValueError('llm_config must be provided')

    # 创建测试实例
    test_instance = pd.DataFrame(
        [
            {
                'instance_id': instance_id,
                'task_type': 'code_adaptation',
                'description': 'Adapt and optimize transformer code',
            }
        ]
    )
    instance = test_instance.iloc[0];
    # 创建元数据
    metadata = make_metadata(
        llm_config=llm_config,
        dataset_name='single-transformer',
        agent_class=agent_class,
        max_iterations=max_iterations,
        eval_note=eval_note,
        eval_output_dir=eval_output_dir,
    )
    config = get_config(metadata)
    # Setup logger
    log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs')
    reset_logger_for_multiprocessing(logger, instance['instance_id'], log_dir)

    runtime = create_runtime(config)
    call_async_from_sync(runtime.connect)
    initialize_runtime(runtime, instance)

    agent = create_agent(runtime, config)

    controller, initial_state = create_controller(agent, runtime, config)

    controller.set_pre_step(next)

    instruction_path = os.path.join(os.path.dirname(__file__), 'instructions.txt')
    with open(instruction_path, 'r') as file:
        instruction = file.read().strip()

    controller.event_stream.add_event(MessageAction(content=instruction), EventSource.USER)

    return controller


def run_single_task(
    instance_id: str = 'single_transformer_test',
    history: str = '',
    agent_class: str = 'CodeActAgent',
    llm_config: Any = 'llm.gpt4o',
    max_iterations: int = 50,
    eval_output_dir: str = 'evaluation_outputs',
    eval_note: Optional[str] = None,
) -> EvalOutput:
    """运行单个任务，不使用并行评估框架。

    Args:
        instance_id: 任务实例ID，默认为'single_transformer_test'
        agent_class: 代理类名，默认为'CodeActAgent'
        llm_config: LLM配置，默认为None（需要在调用时提供）
        max_iterations: 最大迭代次数，默认为10
        eval_output_dir: 评估输出目录，默认为'evaluation_outputs'
        eval_note: 评估注释（可选），默认为None

    Returns:
        EvalOutput: 评估输出结果

    Raises:
        ValueError: 当llm_config为None时抛出
    """
    if llm_config is None:
        raise ValueError('llm_config must be provided')

    # 创建测试实例
    test_instance = pd.DataFrame(
        [
            {
                'instance_id': instance_id,
                'task_type': 'code_adaptation',
                'description': 'Adapt and optimize transformer code',
            }
        ]
    )

    # 创建元数据
    metadata = make_metadata(
        llm_config=llm_config,
        dataset_name='single-transformer',
        agent_class=agent_class,
        max_iterations=max_iterations,
        eval_note=eval_note,
        eval_output_dir=eval_output_dir,
    )

    # 直接处理单个实例
    return process_instance(
        test_instance.iloc[0], metadata, history=history, reset_logger=True
    )


if __name__ == '__main__':
    parser = get_parser()
    parser.add_argument(
        '--single_mode', action='store_true', help='是否以单任务模式运行'
    )
    args, _ = parser.parse_known_args()

    llm_config = None
    print('the llm_config is ', args.llm_config)

    if args.llm_config:
        llm_config = get_llm_config_arg(args.llm_config)

    if llm_config is None:
        raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

    if args.single_mode:
        # 单任务模式
        result = run_single_task(
            instance_id='single_transformer_test',
            agent_class=args.agent_cls,
            llm_config=llm_config,
            max_iterations=args.max_iterations,
            eval_output_dir=args.eval_output_dir,
            eval_note=args.eval_note,
        )
        # 保存结果
        output_file = os.path.join(
            args.eval_output_dir,
            'single-transformer',
            args.agent_cls,
            'single_task_output.jsonl',
        )
        os.makedirs(os.path.dirname(output_file), exist_ok=True)
        with open(output_file, 'w') as f:
            f.write(json.dumps(result.model_dump()) + '\n')
        logger.info(f'Single task result saved to {output_file}')
    else:
        # 原有的并行评估模式
        # 创建一个简单的测试实例
        test_instance = pd.DataFrame(
            [
                {
                    'instance_id': 'single_transformer_test',
                    'task_type': 'code_adaptation',
                    'description': 'Adapt and optimize transformer code',
                }
            ]
        )

        metadata = make_metadata(
            llm_config,
            'single-transformer',
            args.agent_cls,
            args.max_iterations,
            args.eval_note,
            args.eval_output_dir,
        )
        output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
        instances = prepare_dataset(test_instance, output_file, args.eval_n_limit)

        run_evaluation(
            instances, metadata, output_file, args.eval_num_workers, process_instance
        )
