import sys
import os
from . import update_config
# 获取项目根目录的绝对路径
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 将项目根目录添加到 sys.path
sys.path.append(project_root)

import logging

from src.performance_collector.metric_collector import MetricCollector
from src.performance_analyzer.performance_analyzer import PerformanceAnalyzer

from src.performance_collector.static_collector import collect_system_profile
from src.performance_optimizer.param_recommender import ParamRecommender

from src.utils.shell_execute import SshClient
from src.utils.config.config import config
from src.utils.common import display_metrics

from src.performance_optimizer.pressure_test import PressureTest

from src.performance_optimizer.param_optimizer import ParamOptimizer,PerformanceMetric
from src.utils.llm import get_llm_response

logging.basicConfig(
    level=logging.INFO,  # 设置日志级别
    format="%(asctime)s - %(levelname)s - %(message)s",  # 设置日志格式
    datefmt="%Y-%m-%d %H:%M:%S",  # 设置时间格式
)

max_retries = config["servers"][0]["max_retries"]
delay = config["servers"][0]["delay"]
need_restart_application = config["feature"][0]["need_restart_application"]
pressure_test_mode = config["feature"][0]["pressure_test_mode"]



def AnalyzerService(host_ip,host_port,host_user,host_password,app):
    logging.info("[Main] collecting performance ...")
    metric_collector = MetricCollector(
        host_ip=host_ip,
        host_port=host_port,
        host_user=host_user,
        host_password=host_password,
        app=app
    )
    data = metric_collector.run_all_collector()

    logging.info("[Main] analyzing performance ...")
    testAnalyzer = PerformanceAnalyzer(data=data)
    cpu_report, disk_report, memory_report, network_report, app_report, bottleneck = testAnalyzer.run_service()
    print(">>> PerformanceAnalyzer运行结果：",cpu_report, disk_report, memory_report, network_report, app_report, bottleneck)

    report = {
        "cpu_report": cpu_report,
        "disk_report": disk_report,
        "memory_report": memory_report,
        "network_report": network_report,
        "app_report": app_report,
        "bottleneck": bottleneck
    }
    return report

def get_performance_metric(app_name: str) -> PerformanceMetric:
    """
    根据应用程序名称返回对应的性能指标。
    如果没有匹配到任何已知的应用程序名称，则返回默认值 PerformanceMetric.QPS。
    :param app_name: 应用程序名称
    :return: PerformanceMetric 枚举值
    """
    app_name = app_name.lower()  # 转换为小写以便统一处理
    app_metric_map = {
        "cpu": PerformanceMetric.CPU_USAGE,
        "mysql": PerformanceMetric.QPS,
        "nginx": PerformanceMetric.RPS,
        "network": PerformanceMetric.NETWORK_TRAFFIC,
        "memory": PerformanceMetric.MEMORY_USAGE,
        "disk": PerformanceMetric.DISK_USAGE
    }

    # 如果匹配到，则返回对应的枚举值，否则返回默认值 QPS
    return app_metric_map.get(app_name, PerformanceMetric.QPS)


# 一键优化接口函数
def OptimizerService(host_ip,host_port,host_user,host_password,app="cpu",appconfig=None,mock=True, original_result = None):
    if mock:
        print("original_result:", original_result)
        return {
            "baseline": 1234,
            "bestresult": 4321
        }

    if app != "cpu":
        update_config.update_app_config(appconfig)

    ssh_client = SshClient(
        host_ip=host_ip,
        host_port=host_port,
        host_user=host_user,
        host_password=host_password,
        max_retries=max_retries,
        delay=delay,
    )

    # logging.info("[Main] collecting performance ...")
    # static_profile = collect_system_profile(
    #     host_ip=host_ip,
    #     host_port=host_port,
    #     host_user=host_user,
    #     host_password=host_password,
    # )

    if pressure_test_mode:
        logging.info(f"[Main] start pressure test ...")
        # 压测模式若开启，则采集前通过压测模拟负载环境，压测期间采集负载数据
        # 压测模式若关闭，则按照流程执行benchmark作为基线
        pressure_test = PressureTest(app, ssh_client)
        pressure_test.start()

    # metric_collector = MetricCollector(
    #     host_ip=host_ip,
    #     host_port=host_port,
    #     host_user=host_user,
    #     host_password=host_password,
    #     app=app
    # )
    # data = metric_collector.run_all_collector()

    # logging.info("[Main] analyzing performance ...")
    # testAnalyzer = PerformanceAnalyzer(data=data)
    # # report, bottleneck = testAnalyzer.run()
    # # print(">>> PerformanceAnalyzer运行结果：", report, bottleneck)
    # cpu_report, disk_report, memory_report, network_report, app_report, bottleneck = testAnalyzer.run_service()
    # print(">>> PerformanceAnalyzer运行结果：",cpu_report, disk_report, memory_report, network_report, app_report, bottleneck)

    # report = {
    #     "cpu_report": cpu_report,
    #     "disk_report": disk_report,
    #     "memory_report": memory_report,
    #     "network_report": network_report,
    #     "app_report": app_report,
    #     "bottleneck": bottleneck
    # }

    def slo_calc_callback(baseline, benchmark_result):
        if baseline is None or abs(baseline) < 1e-9:
            return 0.0
        return (benchmark_result - baseline) / baseline


    param_optimizer = ParamOptimizer(
        service_name=app,
        performance_metric=get_performance_metric(app),
        slo_goal=0.1,
        #analysis_report=report,
        #static_profile=static_profile,
        ssh_client=ssh_client,
        slo_calc_callback=slo_calc_callback,
        max_iterations=1,
        need_restart_application=need_restart_application,
        pressure_test_mode=pressure_test_mode,
    )
    baseline, best_result = param_optimizer.run(original_result)
    result = {
        "baseline":baseline,
        "bestresult":best_result
    }
    return result


# 一键分析接口函数
def RecommenderService(host_ip,host_port,host_user,host_password,app="cpu",appconfig=None,mock=True):
    if mock:
        return {
            "report": {
                "cpu_report": "cpu_report_placeholder",
                "disk_report": "disk_report_placeholder",
                "memory_report": "memory_report_placeholder",
                "network_report": "network_report_placeholder",
                "app_report": "app_report_placeholder"
            },
            "bottleneck": "CPU",
            "param": {
                "cpu": ['cpu'],
                "disk": ['disk'],
                "memory": ['memory'],
                "network": ['network'],
                "app": ['app']
            }
        }, {}
    
    if app != "cpu":
        update_config.update_app_config(appconfig)

    ssh_client = SshClient(
        host_ip=host_ip,
        host_port=host_port,
        host_user=host_user,
        host_password=host_password,
        max_retries=max_retries,
        delay=delay,
    )

    logging.info("[Main] collecting performance ...")
    static_profile = collect_system_profile(
        host_ip=host_ip,
        host_port=host_port,
        host_user=host_user,
        host_password=host_password,
    )

    if pressure_test_mode:
        logging.info(f"[Main] start pressure test ...")
        # 压测模式若开启，则采集前通过压测模拟负载环境，压测期间采集负载数据
        # 压测模式若关闭，则按照流程执行benchmark作为基线
        pressure_test = PressureTest(app, ssh_client)
        pressure_test.start()

    metric_collector = MetricCollector(
        host_ip=host_ip,
        host_port=host_port,
        host_user=host_user,
        host_password=host_password,
        app=app
    )
    data = metric_collector.run_all_collector()

    logging.info("[Main] analyzing performance ...")
    testAnalyzer = PerformanceAnalyzer(data=data)
    # report, bottleneck = testAnalyzer.run()
    # print(">>> PerformanceAnalyzer运行结果：", report, bottleneck)
    cpu_report, disk_report, memory_report, network_report, app_report, bottleneck = testAnalyzer.run_service()
    print(">>> PerformanceAnalyzer运行结果：",cpu_report, disk_report, memory_report, network_report, app_report, bottleneck)

    report = {
        "cpu_report": cpu_report,
        "disk_report": disk_report,
        "memory_report": memory_report,
        "network_report": network_report,
        "app_report": app_report,
        "bottleneck": bottleneck
    }

    param_recommender = ParamRecommender(
            service_name=app,
            slo_goal=0.1,
            performance_metric=get_performance_metric(app),
            static_profile=static_profile,
            performance_analysis_report=report,
            ssh_client=ssh_client,
        )
    original_result = param_recommender.run([])

    result_classification = classify_params(original_result)
    final_result = {
        "report":report,
        "bottleneck":bottleneck,
        "param":result_classification
    }
    return final_result, original_result

def classify_params(param_dict):
    # 参考已分类的参数
    cpu_list = {
        "kernel.sched_migration_cost_ns", "kernel.sched_cfs_bandwidth_slice_us",
        "kernel.sched_wakeup_granularity_ns", "kernel.sched_latency_ns",
        "kernel.sched_nr_migrate", "kernel.sched_min_granularity_ns",
        "kernel.sched_tunable_scaling", "kernel.sched_rt_runtime_us",
        "kernel.timer_migration", "kernel.threads-max", "kernel.sched_autogroup_enabled",
        "kernel.numa_balancing", "kernel.nmi_watchdog", "kernel.sysrq"
    }

    disk_list = {
        "blockdev", "block.fifo_batch", "block.front_merges", "block.read_expire",
        "block.writes_starved", "block.max_sectors_kb", "block.queue_depth",
        "block.nr_requests", "block.read_ahead_kb", "block.rq_affinity", "block.add_random",
        "block.rotational", "block.scheduler", "block.write_cache", "block.nomerges",
        "blockdev_multidisk", "block.fifo_batch_multidisk", "block.front_merges_multidisk",
        "block.read_expire_multidisk", "block.writes_starved_multidisk",
        "block.max_sectors_kb_multidisk", "block.queue_depth_multidisk",
        "block.nr_requests_multidisk", "block.read_ahead_kb_multidisk",
        "block.rq_affinity_multidisk", "block.add_random_multidisk",
        "block.rotational_multidisk", "block.scheduler_multidisk",
        "block.write_cache_multidisk", "block.nomerges_multidisk"
    }

    memory_list = {
        "transparent_hugepage.defrag", "transparent_hugepage.enabled",
        "kernel.pid_max", "kernel.shmmni", "kernel.shmmax", "kernel.shmall",
        "kernel.core_uses_pid", "kernel.msgmni", "kernel.msgmax", "kernel.msgmnb",
        "kernel.sem", "kernel.hung_task_timeout_secs", "kernel.randomize_va_space",
        "vm.swappiness", "vm.vfs_cache_pressure", "vm.dirty_background_ratio",
        "vm.dirty_ratio", "vm.stat_interval", "vm.dirty_expire_centisecs",
        "vm.dirty_writeback_centisecs", "vm.overcommit_ratio", "vm.overcommit_memory",
        "vm.min_free_kbytes", "vm.page-cluster", "vm.max_map_count",
        "vm.zone_reclaim_mode", "vm.watermark_scale_factor", "vm.numa_stat",
        "vm.drop_caches", "fs.inotify.max_user_watches", "fs.nr_open", "fs.file-max",
        "fs.aio-max-nr", "fs.inotify.max_user_instances", "fs.suid_dumpable"
    }

    network_list = {
        "net.netfilter.nf_conntrack_max", "net.mtu", "net.tx-frames", "net.rx-frames",
        "net.tx-usecs", "net.rx-usecs", "net.combined", "net.adaptive-rx",
        "net.adaptive-tx", "net.tx-ring buffer size", "net.rx-ring buffer size",
        "net.generic-receive-offload", "net.generic-segmentation-offload",
        "net.tcp-segmentation-offload", "net.core.netdev_budget", "net.core.optmem_max",
        "net.core.wmem_max", "net.core.wmem_default", "net.core.rmem_default",
        "net.core.rmem_max", "net.core.netdev_max_backlog",
        "net.ipv4.tcp_thin_linear_timeouts", "net.unix.max_dgram_qlen",
        "net.core.somaxconn", "net.core.busy_poll", "net.core.busy_read",
        "net.core.dev_weight", "net.ipv4.tcp_keepalive_intvl",
        "net.ipv4.tcp_keepalive_probes", "net.ipv4.tcp_keepalive_time",
        "net.ipv4.tcp_tw_reuse", "net.ipv4.tcp_window_scaling",
        "net.ipv4.tcp_fin_timeout", "net.ipv4.udp_mem", "net.ipv4.tcp_mem",
        "net.ipv4.tcp_rmem", "net.ipv4.tcp_wmem", "net.ipv4.tcp_fastopen",
        "net.ipv4.tcp_synack_retries", "net.ipv4.tcp_syn_retries",
        "net.ipv4.tcp_moderate_rcvbuf", "net.ipv4.tcp_timestamps",
        "net.ipv4.tcp_dsack", "net.ipv4.tcp_fack", "net.ipv4.tcp_sack",
        "net.ipv4.tcp_low_latency", "net.ipv4.tcp_adv_win_scale",
        "net.ipv4.route.max_size", "net.ipv4.tcp_max_tw_buckets",
        "net.ipv4.tcp_max_syn_backlog", "net.ipv4.tcp_max_orphans",
        "net.ipv4.tcp_ecn", "net.ipv4.ip_forward", "net.ipv4.conf.default.rp_filter",
        "net.ipv4.ip_local_port_range", "net.ipv4.tcp_no_metrics_save",
        "net.ipv4.ip_default_ttl", "net.ipv4.ip_no_pmtu_disc", "net.ipv4.tcp_retries2",
        "net.ipv4.tcp_orphan_retries", "net.ipv4.tcp_syncookies",
        "net.ipv4.tcp_reordering", "net.ipv4.tcp_retrans_collapse",
        "net.ipv4.tcp_congestion_control", "net.ipv4.conf.default.promote_secondaries",
        "net.ipv4.conf.all.promote_secondaries", "net.ipv4.conf.all.accept_redirects",
        "net.ipv4.conf.default.accept_redirects", "net.ipv4.conf.all.secure_redirects",
        "net.ipv4.conf.default.secure_redirects", "net.ipv4.icmp_echo_ignore_broadcasts",
        "net.nf_conntrack_max", "net.netfilter.nf_conntrack_tcp_timeout_established",
        "net.netfilter.nf_conntrack_tcp_timeout_close_wait",
        "net.netfilter.nf_conntrack_tcp_timeout_fin_wait",
        "net.netfilter.nf_conntrack_tcp_timeout_time_wait",
        "net.ipv4.conf.default.forwarding", "net.core.rps_sock_flow_entries",
        "net.ipv4.tcp_min_tso_segs"
    }

    cpu, disk, memory, network, app = [], [], [], [], []

    for k, v in param_dict.items():
        entry = f"{k}={v}"
        if k in cpu_list:
            cpu.append(entry)
        elif k in disk_list:
            disk.append(entry)
        elif k in memory_list:
            memory.append(entry)
        elif k in network_list:
            network.append(entry)
        else:
            app.append(entry)

    return {
        "cpu": cpu,
        "disk": disk,
        "memory": memory,
        "network": network,
        "app": app
    }


def param_classification(param_result):
    print(param_result)
    prompt = """
# 任务：按类别分组参数
# 输入：{param_result}
# 要求：
1. 将输入的键值对按 5 个类别分类：
  CPU：与调度器、时间片、负载均衡、CPU 亲和相关的内核参数或应用参数
  I/O：与磁盘/网络队列、缓存、并发、I/O 容量相关的内核参数或应用参数
  Memory：与页缓存、缓冲区、脏页、交换、内存分配相关的参数的内核参数或应用参数
  Network：与 MTU、网卡队列、中断合并、TCP/UDP 栈相关的内核参数或应用参数
  Application：不针对上述四类瓶颈的参数，明显针对具体应用的参数
2. 分类逻辑：
- 依据参数名判断主要作用域，每个参数仅归一类。
3. 输出格式：
返回 JSON 对象，键为类别名，值为对应参数的 list，每个元素为键值对 dict。
无额外文字，若类别为空，返回空列表。
示例：
{{
    "CPU": [{{"sched_latency_ns": 4000000}}],
    "I/O": [{{"io_capacity": 2400}}],
    "Memory": [{{"buffer_pool_size": 314572800}}],
    "Network": [{{"mtu": 9000}}],
    "Application": [{{"innodb_adaptive_flushing": "ON"}}]
}}
"""     
    prompt = prompt.format(param_result=param_result)
    response = get_llm_response(prompt)
    return response

# 测试
if __name__ == "__main__":
    param_str = {'kernel.sched_migration_cost_ns': 300000, 'kernel.sched_latency_ns': 4000000, 'kernel.sched_nr_migrate': 24, 'net.netfilter.nf_conntrack_max': 256000, 'net.mtu': '9000', 'net.tx-frames': 64, 'net.rx-frames': 64, 'net.tx-usecs': 50, 'net.rx-usecs': 50, 'net.combined': 2, 'net.adaptive-rx': 'on', 'net.adaptive-tx': 'on', 'net.tx-ring buffer size': 1024, 'net.rx-ring buffer size': 1024, 'innodb_adaptive_flushing': 'ON', 'innodb_adaptive_hash_index': 'ON', 'innodb_adaptive_hash_index_parts': 16, 'innodb_adaptive_max_sleep_delay': 500000, 'innodb_buffer_pool_chunk_size': 1048576, 'innodb_buffer_pool_size': 314572800, 'innodb_buffer_pool_dump_at_shutdown': 'ON', 'innodb_buffer_pool_dump_pct': 80, 'innodb_buffer_pool_instances': 2, 'innodb_change_buffer_max_size': 50, 'innodb_commit_concurrency': 8, 'innodb_checksum_algorithm': 'crc32', 'innodb_compression_level': 2, 'innodb_concurrency_ticketelay': 500000, 'innodb_buffer_pool_chunk_size': 1048576, 'innodb_buffer_pool_size': 314572800, 'innodb_buffer_pool_dump_at_shutdown': 'ON', 'innodb_buffer_pool_dump_pct': 80, 'innodb_buffer_pool_instances': 2, 'innodb_change_buffer_max_size': 50, 'innodb_commit_concurrency': 8, 'innodb_checksum_algorithm': 'crc32', 'innodb_compression_level': 2, 'innodb_concurrency_ticketer_pool_instances': 2, 'innodb_change_buffer_max_size': 50, 'innodb_commit_concurrency': 8, 'innodb_checksum_algorithm': 'crc32', 'innodb_compression_level': 2, 'innodb_concurrency_tickets': 100, 'innodb_ddl_threads': 4, 'innodb_default_row_format': 'DYNAMIC', 'innodb_disable_sort_file_cache': 'ON', 'innodb_doublewrite': 'ON', 'innodb_fast_shutdown': 1, 'innodb_file_per_table': 'ON', 'innodb_flush_log_at_trx_commit': 2, 'innodb_io_capacity': 2400, 'innodb_io_capacity_max': 10000, 'innodb_lock_wait_timeout': 30, 'innodb_log_buffer_size': 33554432, 'innodb_able': 'ON', 'innodb_flush_log_at_trx_commit': 2, 'innodb_io_capacity': 2400, 'innodb_io_capacity_max': 10000, 'innodb_lock_wait_timeout': 30, 'innodb_log_buffer_size': 33554432, 'innodb_log_file_size': 134217728, 'innodb_max_dirty_pages_pct': 95, 'innodb_old_blocks_pct': 50, 'innodb_open_files': 500, 'innodb_parallel_read_threads': 8, 'innodb_read_ahead_threshold': 64, 'innodb_read_io_threads': 8, 'innodb_sync_array_size': 600, 'innodb_thread_concurrency': 4, 'innodb_write_io_threads': 8, 'thread_cache_size': 16, 'tmp_table_size': 33554432}
    print(classify_params(param_str))

    exit(1)
    print(get_performance_metric(app))  # 输出：PerformanceMetric.CPU_USAGE
    print(type(get_performance_metric(app)))
    print(get_performance_metric("mysql").value)  # 输出：PerformanceMetric.QPS
    print(get_performance_metric("nginx").value)  # 输出：PerformanceMetric.THROUGHPUT
    print(get_performance_metric("network"))  # 输出：PerformanceMetric.NETWORK_TRAFFIC
    print(get_performance_metric("unknown"))  # 输出：PerformanceMetric.QPS