/******************************************************************************
 * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
 * libkperf licensed under the Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *     http://license.coscl.org.cn/MulanPSL2
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
 * PURPOSE.
 * See the Mulan PSL v2 for more details.
 * Author:
 * Create: 2025-01-13
 * Description: Thread binding manager implementation
 ******************************************************************************/

#include "../include/thread_binding.h"
#include "../include/utils.h"
#include "../../include/pmu.h"
#include "../../include/pcerrc.h"
#include <algorithm>
#include <cstring>
#include <sstream>
#include <iomanip>
#include <unordered_set>
#include <unistd.h>

namespace ThreadBinding {

// Implementation of ThreadMetrics::calculateActivityScore
double ThreadMetrics::calculateActivityScore() const {
    // Activity score = LLC miss ratio * weight + LLC miss absolute count * weight + cache access activity * weight
    double llcMissRatioScore = std::min(miss_ratio, 1.0);
    double llcMissCountScore = std::min(static_cast<double>(llc_miss) / 1000000.0, 1.0); // 1M for full score
    double cacheActivityScore = std::min(static_cast<double>(llc_cache) / 10000000.0, 1.0); // 10M for full score

    // If LLC cache access is very low, the thread might be inactive
    if (llc_cache < 1000) {
        cacheActivityScore *= 0.5; // Reduce activity score
    }

    return llcMissRatioScore * 0.4 + llcMissCountScore * 0.4 + cacheActivityScore * 0.2;
}

// ThreadBindingManager implementation
ThreadBindingManager::ThreadBindingManager(const Config& config)
    : config_(config), running_(false), should_stop_(false), pmu_fd_(-1) {

    stats_.total_bindings = 0;
    stats_.successful_bindings = 0;
    stats_.failed_bindings = 0;
    stats_.collection_cycles = 0;
    stats_.start_time = std::chrono::steady_clock::now();
    stats_.last_update = stats_.start_time;
}

ThreadBindingManager::~ThreadBindingManager() {
    stop();
    if (pmu_fd_ != -1) {
        PmuClose(pmu_fd_);
        pmu_fd_ = -1;
    }
}

bool ThreadBindingManager::initialize() {
    Utils::log_message("Initializing Thread Binding Manager...", config_.verbose);

    // Initialize system information
    if (!initialize_system_info()) {
        Utils::log_error("Failed to initialize system information");
        return false;
    }

    // Initialize PMU - 不使用memset，手动初始化避免指针被清零
    pmu_attr_ = std::make_unique<PmuAttr>();

    // 手动初始化PmuAttr结构体，避免使用memset导致指针被清零
    pmu_attr_->evtList = nullptr;  // 将在collect_thread_metrics中设置
    pmu_attr_->numEvt = 0;
    pmu_attr_->pidList = nullptr;  // 将在collect_thread_metrics中设置
    pmu_attr_->numPid = 0;
    pmu_attr_->cpuList = nullptr;  // 将在collect_thread_metrics中设置
    pmu_attr_->numCpu = 0;
    pmu_attr_->evtAttr = nullptr;
    pmu_attr_->period = 0;
    pmu_attr_->useFreq = 0;
    pmu_attr_->excludeUser = 0;
    pmu_attr_->excludeKernel = 0;
    pmu_attr_->symbolMode = NO_SYMBOL_RESOLVE;
    pmu_attr_->callStack = 0;
    pmu_attr_->blockedSample = 0;
    pmu_attr_->dataFilter = SPE_FILTER_NONE;
    pmu_attr_->evFilter = SPE_EVENT_NONE;
    pmu_attr_->minLatency = 0;
    pmu_attr_->includeNewFork = 0;
    pmu_attr_->branchSampleFilter = KPERF_NO_BRANCH_SAMPLE;

    Utils::log_message("Thread Binding Manager initialized successfully", config_.verbose);
    return true;
}

void ThreadBindingManager::start() {
    if (running_.load()) {
        Utils::log_warning("Thread Binding Manager is already running");
        return;
    }

    Utils::log_message("Starting Thread Binding Manager...", config_.verbose);

    running_.store(true);
    should_stop_.store(false);

    // Start worker threads
    collection_thread_ = std::thread(&ThreadBindingManager::collection_worker_thread, this);

    if (config_.enable_continuous_binding) {
        binding_thread_ = std::thread(&ThreadBindingManager::binding_worker_thread, this);
    }

    Utils::log_message("Thread Binding Manager started successfully", config_.verbose);
}

void ThreadBindingManager::stop() {
    if (!running_.load()) {
        return;
    }

    Utils::log_message("Stopping Thread Binding Manager...", config_.verbose);

    should_stop_.store(true);
    cv_.notify_all();

    if (collection_thread_.joinable()) {
        collection_thread_.join();
    }

    if (binding_thread_.joinable()) {
        binding_thread_.join();
    }

    running_.store(false);
    Utils::log_message("Thread Binding Manager stopped", config_.verbose);
}

void ThreadBindingManager::update_config(const Config& config) {
    std::lock_guard<std::mutex> lock(data_mutex_);
    config_ = config;
    Utils::log_message("Configuration updated", config_.verbose);
}

ThreadBindingManager::Statistics ThreadBindingManager::get_statistics() const {
    std::lock_guard<std::mutex> lock(stats_mutex_);
    return stats_;
}

bool ThreadBindingManager::initialize_system_info() {
    Utils::SystemInfo sys_info = Utils::get_system_info();

    if (sys_info.total_cpus == 0) {
        Utils::log_error("Failed to get system CPU information");
        return false;
    }

    // Initialize CPU cores
    cpu_cores_.clear();
    for (int i = 0; i < sys_info.total_cpus; ++i) {
        int numa_id = sys_info.cpu_to_numa_map[i];
        cpu_cores_.emplace_back(i, numa_id);
    }

    // Initialize NUMA nodes
    numa_nodes_.clear();
    for (const auto& numa_pair : sys_info.numa_to_cpus_map) {
        NumaNode numa_node(numa_pair.first);
        numa_node.cpu_cores = numa_pair.second;
        numa_node.available_cores = numa_pair.second; // Initially all cores are available
        numa_nodes_.push_back(numa_node);
    }

    Utils::log_message("System info initialized: " + std::to_string(sys_info.total_cpus) +
                      " CPUs, " + std::to_string(sys_info.numa_nodes) + " NUMA nodes", config_.verbose);

    return true;
}

bool ThreadBindingManager::collect_thread_metrics() {
    // Find threads by keyword
    std::vector<ThreadMetrics> new_threads = find_threads_by_keyword(config_.process_keyword);

    if (new_threads.empty()) {
        Utils::log_warning("No threads found matching keyword: " + config_.process_keyword);
        return false;
    }

    // 按进程分组，获取所有进程的PID
    std::unordered_map<int, std::vector<ThreadMetrics*>> pid_to_threads;
    std::vector<int> process_pids;
    std::vector<int> all_cpus;
    std::unordered_set<int> unique_cpus;

    for (auto& thread : new_threads) {
        // 按PID分组
        pid_to_threads[thread.pid].push_back(&thread);
        
        // Get CPU affinity for this thread
        Utils::CpuAffinityInfo affinity_info = Utils::get_thread_cpu_affinity(thread.tid);
        if (affinity_info.is_valid) {
            thread.cpu_affinity = affinity_info.allowed_cpus;
            thread.numa_id = affinity_info.numa_id;

            // Add CPUs to the collection list
            for (int cpu : affinity_info.allowed_cpus) {
                if (unique_cpus.find(cpu) == unique_cpus.end()) {
                    all_cpus.push_back(cpu);
                    unique_cpus.insert(cpu);
                }
            }
        }
    }

    // 获取所有进程PID
    for (const auto& pid_pair : pid_to_threads) {
        process_pids.push_back(pid_pair.first);
    }

    if (process_pids.empty() || all_cpus.empty()) {
        Utils::log_warning("No valid PIDs or CPUs found for collection");
        return false;
    }

    Utils::log_message("Collecting PMU data for " + std::to_string(process_pids.size()) + 
                      " processes (" + std::to_string(new_threads.size()) + " threads) on " + 
                      std::to_string(all_cpus.size()) + " CPUs", config_.verbose);

    // Set up PMU events for LLC miss and cache access
    static char* evtList[2];
    evtList[0] = const_cast<char*>("r33"); // LLC miss
    evtList[1] = const_cast<char*>("r32"); // LLC cache access

    // Set up PMU collection for process-level monitoring (like llc_miss_thread_bind.cpp)
    pmu_attr_->evtList = evtList;
    pmu_attr_->numEvt = 2;
    pmu_attr_->pidList = process_pids.data();  // 使用进程PID，不是线程TID
    pmu_attr_->numPid = process_pids.size();
    pmu_attr_->cpuList = all_cpus.data();
    pmu_attr_->numCpu = all_cpus.size();

    // Open PMU
    if (pmu_fd_ != -1) {
        PmuClose(pmu_fd_);
    }

    pmu_fd_ = PmuOpen(COUNTING, pmu_attr_.get());
    if (pmu_fd_ == -1) {
        Utils::log_error("Failed to open PMU: " + std::string(Perror()));
        return false;
    }

    // Enable PMU collection
    if (PmuEnable(pmu_fd_) != 0) {
        Utils::log_error("Failed to enable PMU: " + std::string(Perror()));
        PmuClose(pmu_fd_);
        pmu_fd_ = -1;
        return false;
    }

    // Collect data for minimum time
    Utils::sleep_ms(config_.min_collection_time_ms);

    // Read PMU data
    PmuData* pmu_data = nullptr;
    int data_len = PmuRead(pmu_fd_, &pmu_data);

    if (data_len == -1) {
        Utils::log_error("Failed to read PMU data: " + std::string(Perror()));
        PmuDisable(pmu_fd_);
        PmuClose(pmu_fd_);
        pmu_fd_ = -1;
        return false;
    }

    if (data_len == 0) {
        Utils::log_warning("No PMU data collected");
        PmuDataFree(pmu_data);
        PmuDisable(pmu_fd_);
        return false;
    }

    Utils::log_message("Collected " + std::to_string(data_len) + " PMU data points", config_.verbose);

    // Process PMU data (参考llc_miss_thread_bind.cpp的处理方式)
    std::unordered_map<unsigned, std::pair<unsigned, unsigned>> tid_data; // tid -> (llc_miss, llc_cache)
    std::unordered_map<unsigned, int> tid_to_pid; // 记录每个线程属于哪个进程
    std::unordered_map<unsigned, std::string> tid_to_comm; // 记录每个线程的进程名

    for (int i = 0; i < data_len; ++i) {
        const PmuData& data = pmu_data[i];
        if (!data.evt) continue;  // 跳过无效事件

        unsigned tid = data.tid;
        int pid = data.pid;

        // 记录线程属于哪个进程
        tid_to_pid[tid] = pid;

        // 记录进程名
        if (data.comm != nullptr) {
            tid_to_comm[tid] = std::string(data.comm);
        }

        if (strcmp(data.evt, "r33") == 0) {
            tid_data[tid].first += data.count;  // LLC miss
        } else if (strcmp(data.evt, "r32") == 0) {
            tid_data[tid].second += data.count; // LLC cache
        }
    }

    Utils::log_message("Processed PMU data for " + std::to_string(tid_data.size()) + " threads", config_.verbose);

    // Update thread metrics with collected data
    for (auto& thread : new_threads) {
        auto it = tid_data.find(thread.tid);
        if (it != tid_data.end()) {
            thread.llc_miss = it->second.first;
            thread.llc_cache = it->second.second;
            thread.miss_ratio = (thread.llc_cache != 0) ?
                static_cast<double>(thread.llc_miss) / thread.llc_cache : 0.0;
            thread.activity_score = thread.calculateActivityScore();
            thread.last_update = std::chrono::steady_clock::now();

            // 更新进程名（如果PMU数据中有的话）
            auto comm_it = tid_to_comm.find(thread.tid);
            if (comm_it != tid_to_comm.end() && !comm_it->second.empty()) {
                thread.comm = comm_it->second;
            }

            // 打印详细的LLC miss数据用于调试
            if (config_.verbose) {
                // 如果是第一个线程，打印表头
                static bool header_printed = false;
                if (!header_printed) {
                    std::cout << "\n" << std::string(120, '=') << std::endl;
                    std::cout << "THREAD LLC METRICS TABLE" << std::endl;
                    std::cout << std::string(120, '=') << std::endl;
                    std::cout << std::setw(8) << "TID"
                             << std::setw(8) << "PID"
                             << std::setw(16) << "Process"
                             << std::setw(12) << "LLC Miss"
                             << std::setw(12) << "LLC Cache"
                             << std::setw(10) << "Miss %"
                             << std::setw(8) << "NUMA"
                             << std::setw(12) << "Activity"
                             << std::setw(15) << "CPU Affinity" << std::endl;
                    std::cout << std::string(120, '-') << std::endl;
                    header_printed = true;
                }

                // 打印线程数据行
                std::string process_name = thread.comm.empty() ? "unknown" : thread.comm;
                if (process_name.length() > 14) {
                    process_name = process_name.substr(0, 11) + "...";
                }

                std::cout << std::setw(8) << thread.tid
                         << std::setw(8) << thread.pid
                         << std::setw(16) << std::left << process_name << std::right
                         << std::setw(12) << thread.llc_miss
                         << std::setw(12) << thread.llc_cache
                         << std::setw(10) << std::fixed << std::setprecision(2)
                         << (thread.miss_ratio * 100.0) << "%"
                         << std::setw(8) << thread.numa_id
                         << std::setw(12) << std::fixed << std::setprecision(4)
                         << thread.activity_score;

                // 打印CPU亲和性
                if (thread.cpu_affinity.empty()) {
                    std::cout << std::setw(15) << "None";
                } else {
                    std::string affinity_str;
                    for (size_t i = 0; i < thread.cpu_affinity.size(); ++i) {
                        if (i > 0) affinity_str += ",";
                        affinity_str += std::to_string(thread.cpu_affinity[i]);
                    }
                    if (affinity_str.length() > 13) {
                        affinity_str = affinity_str.substr(0, 10) + "...";
                    }
                    std::cout << std::setw(15) << affinity_str;
                }
                std::cout << std::endl;
            }
        } else {
            // 对于没有收集到数据的线程，也打印信息
            if (config_.verbose) {
                std::string process_name = thread.comm.empty() ? "unknown" : thread.comm;
                if (process_name.length() > 14) {
                    process_name = process_name.substr(0, 11) + "...";
                }

                std::cout << std::setw(8) << thread.tid
                         << std::setw(8) << thread.pid
                         << std::setw(16) << std::left << process_name << std::right
                         << std::setw(12) << "N/A"
                         << std::setw(12) << "N/A"
                         << std::setw(10) << "N/A"
                         << std::setw(8) << thread.numa_id
                         << std::setw(12) << "N/A"
                         << std::setw(15) << "N/A" << std::endl;
            }
        }
    }

    // Update current threads
    {
        std::lock_guard<std::mutex> lock(data_mutex_);
        current_threads_ = std::move(new_threads);
    }

    // Clean up
    PmuDataFree(pmu_data);
    PmuDisable(pmu_fd_);

    // Update statistics
    {
        std::lock_guard<std::mutex> lock(stats_mutex_);
        stats_.collection_cycles++;
        stats_.last_update = std::chrono::steady_clock::now();
    }

    Utils::log_message("Collected metrics for " + std::to_string(current_threads_.size()) + " threads", config_.verbose);

    // 打印汇总统计信息
    if (config_.verbose && !current_threads_.empty()) {
        print_collection_summary(current_threads_);
    }

    return true;
}

bool ThreadBindingManager::perform_thread_binding() {
    std::vector<ThreadMetrics> threads_to_bind;

    {
        std::lock_guard<std::mutex> lock(data_mutex_);
        threads_to_bind = current_threads_;
    }

    if (threads_to_bind.empty()) {
        Utils::log_warning("No threads available for binding");
        return false;
    }

    Utils::log_message("Performing thread binding for " + std::to_string(threads_to_bind.size()) + " threads", config_.verbose);

    // 打印绑核前的线程分布分析
    if (config_.verbose) {
        print_binding_analysis(threads_to_bind, "BEFORE BINDING");
    }

    // Group threads by NUMA node first
    std::unordered_map<int, std::vector<ThreadMetrics>> numa_threads;
    for (const auto& thread : threads_to_bind) {
        if (thread.numa_id >= 0) {
            numa_threads[thread.numa_id].push_back(thread);
        } else {
            Utils::log_warning("Thread " + std::to_string(thread.tid) + " has invalid NUMA ID");
        }
    }

    if (numa_threads.empty()) {
        Utils::log_warning("No threads with valid NUMA information found");
        return false;
    }

    // Perform binding for each NUMA node with strategy-specific hyperthread-aware binding
    bool any_success = false;

    for (const auto& numa_pair : numa_threads) {
        int numa_id = numa_pair.first;
        std::vector<ThreadMetrics> numa_thread_list = numa_pair.second;

        Utils::log_message("Processing NUMA " + std::to_string(numa_id) + " with " +
                          std::to_string(numa_thread_list.size()) + " threads", config_.verbose);

        std::vector<int> available_cores = get_available_cores_for_numa(numa_id);
        if (available_cores.empty()) {
            Utils::log_warning("No available cores for NUMA " + std::to_string(numa_id));
            continue;
        }

        Utils::log_message("NUMA " + std::to_string(numa_id) + " has " +
                          std::to_string(available_cores.size()) + " available cores", config_.verbose);

        // 根据策略进行排序和绑定
        bool success = perform_strategy_specific_binding(numa_thread_list, available_cores, numa_id);
        if (success) {
            any_success = true;
        }
    }

    Utils::log_message("Binding completed with strategy-specific hyperthread-aware strategy", config_.verbose);

    return any_success;
}

bool ThreadBindingManager::perform_strategy_specific_binding(
    std::vector<ThreadMetrics>& threads, 
    const std::vector<int>& available_cores, 
    int numa_id) {
    
    if (threads.empty() || available_cores.empty()) {
        return false;
    }

    // 获取物理核映射
    std::vector<std::pair<int, int>> physical_cores = get_physical_core_mapping(available_cores);
    if (physical_cores.empty()) {
        Utils::log_warning("Failed to get physical core mapping for NUMA " + std::to_string(numa_id));
        return perform_simple_binding(threads, available_cores, numa_id);
    }

    Utils::log_message("NUMA " + std::to_string(numa_id) + ": " + 
                      std::to_string(physical_cores.size()) + " physical cores available", config_.verbose);

    // 根据策略进行不同的排序和互补绑定
    switch (config_.strategy) {
        case BindingStrategy::ACTIVITY_BASED:
            return perform_activity_based_binding(threads, physical_cores, numa_id);
        case BindingStrategy::MISS_RATIO_BASED:
            return perform_miss_ratio_based_binding(threads, physical_cores, numa_id);
        case BindingStrategy::HYBRID:
            return perform_hybrid_based_binding(threads, physical_cores, numa_id);
        default:
            return perform_simple_binding(threads, available_cores, numa_id);
    }
}

bool ThreadBindingManager::perform_activity_based_binding(
    std::vector<ThreadMetrics>& threads, 
    const std::vector<std::pair<int, int>>& physical_cores, 
    int numa_id) {
    
    Utils::log_message("Using activity-based hyperthread-aware binding strategy", config_.verbose);
    
    // 按活跃度排序（高活跃度在前）
    std::sort(threads.begin(), threads.end());
    
    // 分离高活跃度和低活跃度线程
    std::vector<ThreadMetrics*> high_activity, low_activity;
    double activity_threshold = 0.5; // 活跃度阈值
    
    for (auto& thread : threads) {
        if (thread.activity_score > activity_threshold) {
            high_activity.push_back(&thread);
        } else {
            low_activity.push_back(&thread);
        }
    }
    
    Utils::log_message("NUMA " + std::to_string(numa_id) + ": " + 
                      std::to_string(high_activity.size()) + " high-activity, " +
                      std::to_string(low_activity.size()) + " low-activity threads", config_.verbose);
    
    return perform_complementary_binding(high_activity, low_activity, physical_cores, numa_id, "Activity");
}

bool ThreadBindingManager::perform_miss_ratio_based_binding(
    std::vector<ThreadMetrics>& threads, 
    const std::vector<std::pair<int, int>>& physical_cores, 
    int numa_id) {
    
    Utils::log_message("Using miss-ratio-based hyperthread-aware binding strategy", config_.verbose);
    
    // 按miss比例排序（高miss比例在前）
    std::sort(threads.begin(), threads.end(),
             [](const ThreadMetrics& a, const ThreadMetrics& b) {
                 return a.miss_ratio > b.miss_ratio;
             });
    
    // 分离高miss比例和低miss比例线程
    std::vector<ThreadMetrics*> high_miss, low_miss;
    double miss_threshold = 0.1; // 10% miss比例阈值
    
    for (auto& thread : threads) {
        if (thread.miss_ratio > miss_threshold) {
            high_miss.push_back(&thread);
        } else {
            low_miss.push_back(&thread);
        }
    }
    
    Utils::log_message("NUMA " + std::to_string(numa_id) + ": " + 
                      std::to_string(high_miss.size()) + " high-miss, " +
                      std::to_string(low_miss.size()) + " low-miss threads", config_.verbose);
    
    return perform_complementary_binding(high_miss, low_miss, physical_cores, numa_id, "Miss-Ratio");
}

bool ThreadBindingManager::perform_hybrid_based_binding(
    std::vector<ThreadMetrics>& threads, 
    const std::vector<std::pair<int, int>>& physical_cores, 
    int numa_id) {
    
    Utils::log_message("Using hybrid hyperthread-aware binding strategy", config_.verbose);
    
    // 按混合分数排序
    std::sort(threads.begin(), threads.end(),
             [](const ThreadMetrics& a, const ThreadMetrics& b) {
                 return (a.activity_score * 0.7 + a.miss_ratio * 0.3) >
                        (b.activity_score * 0.7 + b.miss_ratio * 0.3);
             });
    
    // 分离高混合分数和低混合分数线程
    std::vector<ThreadMetrics*> high_hybrid, low_hybrid;
    
    for (auto& thread : threads) {
        double hybrid_score = thread.activity_score * 0.7 + thread.miss_ratio * 0.3;
        if (hybrid_score > 0.4) { // 混合分数阈值
            high_hybrid.push_back(&thread);
        } else {
            low_hybrid.push_back(&thread);
        }
    }
    
    Utils::log_message("NUMA " + std::to_string(numa_id) + ": " + 
                      std::to_string(high_hybrid.size()) + " high-hybrid, " +
                      std::to_string(low_hybrid.size()) + " low-hybrid threads", config_.verbose);
    
    return perform_complementary_binding(high_hybrid, low_hybrid, physical_cores, numa_id, "Hybrid");
}

bool ThreadBindingManager::perform_complementary_binding(
    const std::vector<ThreadMetrics*>& group1, 
    const std::vector<ThreadMetrics*>& group2, 
    const std::vector<std::pair<int, int>>& physical_cores, 
    int numa_id, 
    const std::string& strategy_name) {
    
    int total_bindings = 0;
    int successful_bindings = 0;
    
    // 策略1：互补绑定 - 将两个不同组的线程绑定到同一物理核的不同超线程
    size_t pairs_to_bind = std::min(group1.size(), group2.size());
    size_t physical_cores_used = std::min(pairs_to_bind, physical_cores.size());
    
    Utils::log_message("Strategy " + strategy_name + ": Binding " + std::to_string(pairs_to_bind) + 
                      " complementary pairs to " + std::to_string(physical_cores_used) + " physical cores", config_.verbose);
    
    for (size_t i = 0; i < physical_cores_used; ++i) {
        const ThreadMetrics* thread1 = group1[i];
        const ThreadMetrics* thread2 = group2[i];
        
        int core1 = physical_cores[i].first;   // 第一个超线程
        int core2 = physical_cores[i].second;  // 第二个超线程
        
        // 绑定第一个线程到第一个超线程
        total_bindings++;
        bool success1 = bind_thread_to_core(thread1->tid, core1);
        if (success1) {
            successful_bindings++;
            Utils::log_message("Thread " + std::to_string(thread1->tid) + 
                             " (group1, " + get_thread_description(*thread1, strategy_name) + ") "
                             "bound to core " + std::to_string(core1), config_.verbose);
        }
        
        // 绑定第二个线程到第二个超线程
        total_bindings++;
        bool success2 = bind_thread_to_core(thread2->tid, core2);
        if (success2) {
            successful_bindings++;
            Utils::log_message("Thread " + std::to_string(thread2->tid) + 
                             " (group2, " + get_thread_description(*thread2, strategy_name) + ") "
                             "bound to core " + std::to_string(core2), config_.verbose);
        }
        
        update_statistics(success1);
        update_statistics(success2);
    }
    
    // 策略2：处理剩余线程 - 改进的资源分配策略
    std::vector<const ThreadMetrics*> remaining_threads;
    
    // 添加未配对的group1线程
    for (size_t i = physical_cores_used; i < group1.size(); ++i) {
        remaining_threads.push_back(group1[i]);
    }
    
    // 添加未配对的group2线程
    for (size_t i = physical_cores_used; i < group2.size(); ++i) {
        remaining_threads.push_back(group2[i]);
    }
    
    if (!remaining_threads.empty()) {
        Utils::log_message("Strategy " + strategy_name + ": " + std::to_string(remaining_threads.size()) + 
                          " remaining threads to bind", config_.verbose);
        
        // 改进的剩余线程绑定策略
        bool success = perform_overflow_binding(remaining_threads, physical_cores, physical_cores_used, 
                                               numa_id, strategy_name, total_bindings, successful_bindings);
        if (!success) {
            Utils::log_warning("Strategy " + strategy_name + ": Some threads could not be bound due to insufficient cores");
        }
    }
    
    Utils::log_message("Strategy " + strategy_name + " binding completed: " + std::to_string(successful_bindings) +
                      "/" + std::to_string(total_bindings) + " successful", config_.verbose);
    
    return successful_bindings > 0;
}

bool ThreadBindingManager::perform_overflow_binding(
    const std::vector<const ThreadMetrics*>& remaining_threads,
    const std::vector<std::pair<int, int>>& physical_cores,
    size_t cores_used,
    int numa_id,
    const std::string& strategy_name,
    int& total_bindings,
    int& successful_bindings) {
    
    if (remaining_threads.empty()) {
        return true;
    }
    
    Utils::log_message("Performing overflow binding for " + std::to_string(remaining_threads.size()) + 
                      " threads with " + std::to_string(physical_cores.size() - cores_used) + " remaining cores", config_.verbose);
    
    // 策略A：优先使用剩余的物理核（每个物理核绑定一个线程）
    size_t remaining_cores = physical_cores.size() - cores_used;
    size_t threads_to_bind_phase1 = std::min(remaining_threads.size(), remaining_cores);
    
    for (size_t i = 0; i < threads_to_bind_phase1; ++i) {
        const ThreadMetrics* thread = remaining_threads[i];
        int core = physical_cores[cores_used + i].first;  // 使用第一个超线程
        
        total_bindings++;
        bool success = bind_thread_to_core(thread->tid, core);
        if (success) {
            successful_bindings++;
            Utils::log_message("Thread " + std::to_string(thread->tid) + 
                             " (overflow-phase1, " + get_thread_description(*thread, strategy_name) + ") "
                             "bound to core " + std::to_string(core), config_.verbose);
        }
        update_statistics(success);
    }
    
    // 策略B：如果还有剩余线程，使用已绑定线程的第二个超线程
    if (threads_to_bind_phase1 < remaining_threads.size()) {
        size_t remaining_threads_phase2 = remaining_threads.size() - threads_to_bind_phase1;
        size_t available_second_cores = std::min(remaining_threads_phase2, cores_used);
        
        Utils::log_message("Phase 2: Binding " + std::to_string(remaining_threads_phase2) + 
                          " threads to second hyperthreads of " + std::to_string(available_second_cores) + " cores", config_.verbose);
        
        for (size_t i = 0; i < available_second_cores; ++i) {
            const ThreadMetrics* thread = remaining_threads[threads_to_bind_phase1 + i];
            int core = physical_cores[i].second;  // 使用第二个超线程
            
            total_bindings++;
            bool success = bind_thread_to_core(thread->tid, core);
            if (success) {
                successful_bindings++;
                Utils::log_message("Thread " + std::to_string(thread->tid) + 
                                 " (overflow-phase2, " + get_thread_description(*thread, strategy_name) + ") "
                                 "bound to second hyperthread of core " + std::to_string(core), config_.verbose);
            }
            update_statistics(success);
        }
        
        // 策略C：如果还有剩余线程，使用剩余物理核的第二个超线程
        size_t threads_bound_phase2 = available_second_cores;
        if (threads_bound_phase2 < remaining_threads_phase2) {
            size_t remaining_threads_phase3 = remaining_threads_phase2 - threads_bound_phase2;
            size_t available_remaining_second_cores = std::min(remaining_threads_phase3, 
                                                              physical_cores.size() - cores_used);
            
            Utils::log_message("Phase 3: Binding " + std::to_string(remaining_threads_phase3) + 
                              " threads to remaining second hyperthreads", config_.verbose);
            
            for (size_t i = 0; i < available_remaining_second_cores; ++i) {
                const ThreadMetrics* thread = remaining_threads[threads_to_bind_phase1 + threads_bound_phase2 + i];
                int core = physical_cores[cores_used + i].second;  // 使用剩余物理核的第二个超线程
                
                total_bindings++;
                bool success = bind_thread_to_core(thread->tid, core);
                if (success) {
                    successful_bindings++;
                    Utils::log_message("Thread " + std::to_string(thread->tid) + 
                                     " (overflow-phase3, " + get_thread_description(*thread, strategy_name) + ") "
                                     "bound to second hyperthread of core " + std::to_string(core), config_.verbose);
                }
                update_statistics(success);
            }
            
            // 检查是否还有未绑定的线程
            size_t total_bound = threads_to_bind_phase1 + threads_bound_phase2 + available_remaining_second_cores;
            if (total_bound < remaining_threads.size()) {
                size_t unbound_count = remaining_threads.size() - total_bound;
                Utils::log_warning("Strategy " + strategy_name + ": " + std::to_string(unbound_count) + 
                                 " threads could not be bound due to insufficient CPU cores");
                
                // 打印未绑定线程的信息
                for (size_t i = total_bound; i < remaining_threads.size(); ++i) {
                    const ThreadMetrics* thread = remaining_threads[i];
                    Utils::log_message("Unbound thread: " + std::to_string(thread->tid) + 
                                     " (" + get_thread_description(*thread, strategy_name) + ")", config_.verbose);
                }
                
                return false;
            }
        }
    }
    
    return true;
}

std::string ThreadBindingManager::get_thread_description(const ThreadMetrics& thread, const std::string& strategy_name) {
    std::ostringstream oss;
    
    if (strategy_name == "Activity") {
        oss << "activity=" << std::fixed << std::setprecision(3) << thread.activity_score;
    } else if (strategy_name == "Miss-Ratio") {
        oss << "miss=" << std::fixed << std::setprecision(1) << (thread.miss_ratio * 100) << "%";
    } else if (strategy_name == "Hybrid") {
        double hybrid_score = thread.activity_score * 0.7 + thread.miss_ratio * 0.3;
        oss << "hybrid=" << std::fixed << std::setprecision(3) << hybrid_score;
    }
    
    return oss.str();
}

bool ThreadBindingManager::perform_simple_binding(
    const std::vector<ThreadMetrics>& threads, 
    const std::vector<int>& available_cores, 
    int numa_id) {
    
    int total_bindings = 0;
    int successful_bindings = 0;

    // 简单的顺序绑定（原来的逻辑）
    for (size_t i = 0; i < threads.size() && i < available_cores.size(); ++i) {
        const ThreadMetrics& thread = threads[i];
        int core_id = available_cores[i % available_cores.size()];

        total_bindings++;
        bool success = bind_thread_to_core(thread.tid, core_id);
        update_statistics(success);

        if (success) {
            successful_bindings++;
            Utils::log_message("Thread " + std::to_string(thread.tid) +
                             " (" + thread.comm + ") bound to core " +
                             std::to_string(core_id) + " (NUMA " +
                             std::to_string(numa_id) + ")", config_.verbose);
        } else {
            Utils::log_warning("Failed to bind thread " + std::to_string(thread.tid) +
                             " to core " + std::to_string(core_id));
        }
    }

    Utils::log_message("Simple binding completed: " + std::to_string(successful_bindings) +
                      "/" + std::to_string(total_bindings) + " successful", config_.verbose);

    return successful_bindings > 0;
}

std::vector<std::pair<int, int>> ThreadBindingManager::get_physical_core_mapping(const std::vector<int>& available_cores) {
    std::vector<std::pair<int, int>> physical_cores;
    
    // 假设相邻的偶数/奇数核是同一物理核的超线程
    // 例如：(0,1), (2,3), (4,5), ...
    std::vector<int> even_cores, odd_cores;
    
    for (int core : available_cores) {
        if (core % 2 == 0) {
            even_cores.push_back(core);
        } else {
            odd_cores.push_back(core);
        }
    }
    
    // 配对偶数核和奇数核
    size_t pairs = std::min(even_cores.size(), odd_cores.size());
    for (size_t i = 0; i < pairs; ++i) {
        physical_cores.emplace_back(even_cores[i], odd_cores[i]);
    }
    
    Utils::log_message("Created " + std::to_string(physical_cores.size()) + " physical core pairs", config_.verbose);
    
    return physical_cores;
}

void ThreadBindingManager::binding_worker_thread() {
    Utils::log_message("Binding worker thread started", config_.verbose);

    while (!should_stop_.load()) {
        if (perform_thread_binding()) {
            Utils::log_message("Thread binding cycle completed", config_.verbose);
        }

        // Wait for next binding cycle
        std::unique_lock<std::mutex> lock(data_mutex_);
        cv_.wait_for(lock, std::chrono::milliseconds(config_.binding_interval_ms),
                    [this] { return should_stop_.load(); });
    }

    Utils::log_message("Binding worker thread stopped", config_.verbose);
}

void ThreadBindingManager::collection_worker_thread() {
    Utils::log_message("Collection worker thread started", config_.verbose);

    while (!should_stop_.load()) {
        if (collect_thread_metrics()) {
            // Notify binding thread if continuous binding is enabled
            if (config_.enable_continuous_binding) {
                cv_.notify_one();
            }
        }

        // Wait for next collection cycle
        std::unique_lock<std::mutex> lock(data_mutex_);
        cv_.wait_for(lock, std::chrono::milliseconds(config_.collection_interval_ms),
                    [this] { return should_stop_.load(); });
    }

    Utils::log_message("Collection worker thread stopped", config_.verbose);
}

std::vector<int> ThreadBindingManager::get_available_cores_for_numa(int numa_id) {
    std::vector<int> available_cores;

    for (const auto& numa_node : numa_nodes_) {
        if (numa_node.numa_id == numa_id) {
            available_cores = numa_node.available_cores;
            break;
        }
    }

    return available_cores;
}

bool ThreadBindingManager::bind_thread_to_core(int tid, int core_id) {
    return Utils::bind_thread_to_core(tid, core_id);
}

std::vector<ThreadMetrics> ThreadBindingManager::find_threads_by_keyword(const std::string& keyword) {
    std::vector<ThreadMetrics> threads;
    std::vector<int> pids = Utils::find_processes_by_keyword(keyword);
    
    // 获取当前进程ID，排除当前绑核程序
    int current_pid = getpid();
    
    for (int pid : pids) {
        // 排除当前绑核程序进程
        if (pid == current_pid) {
            Utils::log_message("Excluding current binding process (PID: " + std::to_string(pid) + ")", config_.verbose);
            continue;
        }
        
        // 获取进程下的所有线程
        std::vector<int> tids = Utils::get_process_threads(pid);
        std::string process_name = Utils::get_process_name(pid);
        
        if (tids.empty()) {
            Utils::log_warning("No threads found for process " + std::to_string(pid) + " (" + process_name + ")");
            continue;
        }
        
        Utils::log_message("Found " + std::to_string(tids.size()) + " threads in process " + 
                          std::to_string(pid) + " (" + process_name + ")", config_.verbose);
        
        // 为每个线程创建ThreadMetrics对象
        for (int tid : tids) {
            ThreadMetrics thread(pid, static_cast<unsigned>(tid), 0, 0, process_name);
            threads.push_back(thread);
        }
    }
    
    Utils::log_message("Total threads found: " + std::to_string(threads.size()), config_.verbose);
    return threads;
}

void ThreadBindingManager::update_statistics(bool binding_success) {
    std::lock_guard<std::mutex> lock(stats_mutex_);
    stats_.total_bindings++;
    if (binding_success) {
        stats_.successful_bindings++;
    } else {
        stats_.failed_bindings++;
    }
}

void ThreadBindingManager::print_collection_summary(const std::vector<ThreadMetrics>& threads) {
    if (threads.empty()) return;

    std::cout << "\n" << std::string(100, '=') << std::endl;
    std::cout << "COLLECTION SUMMARY - " << threads.size() << " Threads" << std::endl;
    std::cout << std::string(100, '=') << std::endl;

    // 计算总体统计
    uint64_t total_llc_miss = 0;
    uint64_t total_llc_cache = 0;
    double total_activity_score = 0.0;
    double max_activity_score = 0.0;
    double min_activity_score = 1.0;
    double max_miss_ratio = 0.0;
    double min_miss_ratio = 1.0;

    std::unordered_map<int, std::vector<const ThreadMetrics*>> numa_threads;

    for (const auto& thread : threads) {
        total_llc_miss += thread.llc_miss;
        total_llc_cache += thread.llc_cache;
        total_activity_score += thread.activity_score;

        max_activity_score = std::max(max_activity_score, thread.activity_score);
        min_activity_score = std::min(min_activity_score, thread.activity_score);
        max_miss_ratio = std::max(max_miss_ratio, thread.miss_ratio);
        min_miss_ratio = std::min(min_miss_ratio, thread.miss_ratio);

        if (thread.numa_id >= 0) {
            numa_threads[thread.numa_id].push_back(&thread);
        }
    }

    double avg_activity_score = total_activity_score / threads.size();
    double overall_miss_ratio = (total_llc_cache != 0) ?
        static_cast<double>(total_llc_miss) / total_llc_cache : 0.0;

    // 打印总体统计表格
    std::cout << "\nOVERALL STATISTICS:" << std::endl;
    std::cout << std::string(60, '-') << std::endl;
    std::cout << std::setw(25) << std::left << "Metric"
             << std::setw(35) << "Value" << std::endl;
    std::cout << std::string(60, '-') << std::endl;
    std::cout << std::setw(25) << std::left << "Total LLC Miss"
             << std::setw(35) << total_llc_miss << std::endl;
    std::cout << std::setw(25) << std::left << "Total LLC Cache Access"
             << std::setw(35) << total_llc_cache << std::endl;
    std::cout << std::setw(25) << std::left << "Overall LLC Miss Ratio"
             << std::setw(35) << std::fixed << std::setprecision(4)
             << (overall_miss_ratio * 100.0) << "%" << std::endl;
    std::cout << std::setw(25) << std::left << "Average Activity Score"
             << std::setw(35) << std::fixed << std::setprecision(4)
             << avg_activity_score << std::endl;
    std::cout << std::setw(25) << std::left << "Activity Score Range"
             << std::setw(35) << std::fixed << std::setprecision(4)
             << "[" << min_activity_score << ", " << max_activity_score << "]" << std::endl;
    std::cout << std::setw(25) << std::left << "Miss Ratio Range"
             << std::setw(35) << std::fixed << std::setprecision(4)
             << "[" << (min_miss_ratio * 100.0) << "%, " << (max_miss_ratio * 100.0) << "%]" << std::endl;

    // 按NUMA分组统计表格
    std::cout << "\nNUMA-BASED STATISTICS:" << std::endl;
    std::cout << std::string(80, '-') << std::endl;
    std::cout << std::setw(8) << "NUMA"
             << std::setw(8) << "Threads"
             << std::setw(12) << "LLC Miss"
             << std::setw(12) << "LLC Cache"
             << std::setw(12) << "Miss %"
             << std::setw(12) << "Avg Activity"
             << std::setw(16) << "Activity Range" << std::endl;
    std::cout << std::string(80, '-') << std::endl;

    for (const auto& numa_pair : numa_threads) {
        int numa_id = numa_pair.first;
        const std::vector<const ThreadMetrics*>& numa_thread_list = numa_pair.second;

        uint64_t numa_llc_miss = 0;
        uint64_t numa_llc_cache = 0;
        double numa_activity_score = 0.0;
        double numa_max_activity = 0.0;
        double numa_min_activity = 1.0;

        for (const auto* thread : numa_thread_list) {
            numa_llc_miss += thread->llc_miss;
            numa_llc_cache += thread->llc_cache;
            numa_activity_score += thread->activity_score;
            numa_max_activity = std::max(numa_max_activity, thread->activity_score);
            numa_min_activity = std::min(numa_min_activity, thread->activity_score);
        }

        double numa_miss_ratio = (numa_llc_cache != 0) ?
            static_cast<double>(numa_llc_miss) / numa_llc_cache : 0.0;
        double numa_avg_activity = numa_activity_score / numa_thread_list.size();

        std::cout << std::setw(8) << numa_id
                 << std::setw(8) << numa_thread_list.size()
                 << std::setw(12) << numa_llc_miss
                 << std::setw(12) << numa_llc_cache
                 << std::setw(12) << std::fixed << std::setprecision(2)
                 << (numa_miss_ratio * 100.0) << "%"
                 << std::setw(12) << std::fixed << std::setprecision(4)
                 << numa_avg_activity
                 << std::setw(16) << std::fixed << std::setprecision(4)
                 << "[" << numa_min_activity << "," << numa_max_activity << "]" << std::endl;
    }

    // 活跃度分布表格
    std::cout << "\nACTIVITY SCORE DISTRIBUTION:" << std::endl;
    std::cout << std::string(50, '-') << std::endl;
    std::cout << std::setw(20) << "Activity Level"
             << std::setw(10) << "Count"
             << std::setw(10) << "Percentage" << std::endl;
    std::cout << std::string(50, '-') << std::endl;

    int high_activity = 0;    // > 0.7
    int medium_activity = 0;  // 0.3-0.7
    int low_activity = 0;     // < 0.3

    for (const auto& thread : threads) {
        if (thread.activity_score > 0.7) {
            high_activity++;
        } else if (thread.activity_score > 0.3) {
            medium_activity++;
        } else {
            low_activity++;
        }
    }

    std::cout << std::setw(20) << std::left << "High (>0.7)"
             << std::setw(10) << high_activity
             << std::setw(10) << std::fixed << std::setprecision(1)
             << (static_cast<double>(high_activity) / threads.size() * 100.0) << "%" << std::endl;
    std::cout << std::setw(20) << std::left << "Medium (0.3-0.7)"
             << std::setw(10) << medium_activity
             << std::setw(10) << std::fixed << std::setprecision(1)
             << (static_cast<double>(medium_activity) / threads.size() * 100.0) << "%" << std::endl;
    std::cout << std::setw(20) << std::left << "Low (<0.3)"
             << std::setw(10) << low_activity
             << std::setw(10) << std::fixed << std::setprecision(1)
             << (static_cast<double>(low_activity) / threads.size() * 100.0) << "%" << std::endl;

    // 前5个最活跃的线程表格
    std::cout << "\nTOP 5 MOST ACTIVE THREADS:" << std::endl;
    std::cout << std::string(80, '-') << std::endl;
    std::cout << std::setw(4) << "Rank"
             << std::setw(8) << "TID"
             << std::setw(16) << "Process"
             << std::setw(12) << "Activity"
             << std::setw(12) << "Miss %"
             << std::setw(12) << "LLC Miss"
             << std::setw(8) << "NUMA" << std::endl;
    std::cout << std::string(80, '-') << std::endl;

    std::vector<const ThreadMetrics*> sorted_threads;
    for (const auto& thread : threads) {
        sorted_threads.push_back(&thread);
    }

    std::sort(sorted_threads.begin(), sorted_threads.end(),
              [](const ThreadMetrics* a, const ThreadMetrics* b) {
                  return a->activity_score > b->activity_score;
              });

    for (size_t i = 0; i < std::min(size_t(5), sorted_threads.size()); ++i) {
        const auto* thread = sorted_threads[i];
        std::string process_name = thread->comm.empty() ? "unknown" : thread->comm;
        if (process_name.length() > 14) {
            process_name = process_name.substr(0, 11) + "...";
        }

        std::cout << std::setw(4) << (i + 1)
                 << std::setw(8) << thread->tid
                 << std::setw(16) << std::left << process_name << std::right
                 << std::setw(12) << std::fixed << std::setprecision(4) << thread->activity_score
                 << std::setw(12) << std::fixed << std::setprecision(2)
                 << (thread->miss_ratio * 100.0) << "%"
                 << std::setw(12) << thread->llc_miss
                 << std::setw(8) << thread->numa_id << std::endl;
    }

    std::cout << std::string(100, '=') << std::endl << std::endl;
}

void ThreadBindingManager::print_binding_analysis(const std::vector<ThreadMetrics>& threads, const std::string& analysis_type) {
    if (threads.empty()) return;

    std::cout << "\n" << std::string(80, '=') << std::endl;
    std::cout << "BINDING ANALYSIS - " << analysis_type << std::endl;
    std::cout << std::string(80, '=') << std::endl;

    // 按NUMA分组分析
    std::unordered_map<int, std::vector<const ThreadMetrics*>> numa_threads;
    for (const auto& thread : threads) {
        if (thread.numa_id >= 0) {
            numa_threads[thread.numa_id].push_back(&thread);
        }
    }

    // NUMA分组统计表格
    std::cout << "\nNUMA-BASED THREAD DISTRIBUTION:" << std::endl;
    std::cout << std::string(80, '-') << std::endl;
    std::cout << std::setw(8) << "NUMA"
             << std::setw(8) << "Threads"
             << std::setw(12) << "Avg Activity"
             << std::setw(16) << "Activity Range"
             << std::setw(12) << "Avg Miss %"
             << std::setw(16) << "Most Active TID" << std::endl;
    std::cout << std::string(80, '-') << std::endl;

    for (const auto& numa_pair : numa_threads) {
        int numa_id = numa_pair.first;
        const std::vector<const ThreadMetrics*>& numa_thread_list = numa_pair.second;

        // 计算该NUMA的统计信息
        double total_activity = 0.0;
        double total_miss_ratio = 0.0;
        double max_activity = 0.0;
        double min_activity = 1.0;
        const ThreadMetrics* most_active_thread = nullptr;

        for (const auto* thread : numa_thread_list) {
            total_activity += thread->activity_score;
            total_miss_ratio += thread->miss_ratio;
            max_activity = std::max(max_activity, thread->activity_score);
            min_activity = std::min(min_activity, thread->activity_score);

            if (!most_active_thread || thread->activity_score > most_active_thread->activity_score) {
                most_active_thread = thread;
            }
        }

        double avg_activity = total_activity / numa_thread_list.size();
        double avg_miss_ratio = total_miss_ratio / numa_thread_list.size();

        std::cout << std::setw(8) << numa_id
                 << std::setw(8) << numa_thread_list.size()
                 << std::setw(12) << std::fixed << std::setprecision(4) << avg_activity
                 << std::setw(16) << std::fixed << std::setprecision(4)
                 << "[" << min_activity << "," << max_activity << "]"
                 << std::setw(12) << std::fixed << std::setprecision(2)
                 << (avg_miss_ratio * 100.0) << "%";

        if (most_active_thread) {
            std::cout << std::setw(16) << most_active_thread->tid;
        } else {
            std::cout << std::setw(16) << "N/A";
        }
        std::cout << std::endl;
    }

    // 策略效果分析表格
    std::cout << "\nSTRATEGY EFFECTIVENESS ANALYSIS:" << std::endl;
    std::cout << std::string(60, '-') << std::endl;
    std::cout << std::setw(25) << std::left << "Current Strategy"
             << std::setw(35) << (config_.strategy == BindingStrategy::ACTIVITY_BASED ? "Activity-Based" :
                                 config_.strategy == BindingStrategy::MISS_RATIO_BASED ? "Miss-Ratio-Based" : "Hybrid") << std::endl;
    std::cout << std::string(60, '-') << std::endl;

    // 显示前3个将被绑核的线程表格
    std::cout << "\nTOP 3 THREADS TO BE BOUND:" << std::endl;
    std::cout << std::string(70, '-') << std::endl;
    std::cout << std::setw(4) << "Rank"
             << std::setw(8) << "TID"
             << std::setw(16) << "Process"
             << std::setw(12) << "Activity"
             << std::setw(12) << "Miss %"
             << std::setw(8) << "NUMA" << std::endl;
    std::cout << std::string(70, '-') << std::endl;

    for (size_t i = 0; i < std::min(size_t(3), threads.size()); ++i) {
        const auto* thread = &threads[i]; // 当前排序后的前3个
        std::string process_name = thread->comm.empty() ? "unknown" : thread->comm;
        if (process_name.length() > 14) {
            process_name = process_name.substr(0, 11) + "...";
        }

        std::cout << std::setw(4) << (i + 1)
                 << std::setw(8) << thread->tid
                 << std::setw(16) << std::left << process_name << std::right
                 << std::setw(12) << std::fixed << std::setprecision(4) << thread->activity_score
                 << std::setw(12) << std::fixed << std::setprecision(2)
                 << (thread->miss_ratio * 100.0) << "%"
                 << std::setw(8) << thread->numa_id << std::endl;
    }

    std::cout << std::string(80, '=') << std::endl << std::endl;
}

} // namespace ThreadBinding