/******************************************************************************
 * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
 * libkperf licensed under the Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *     http://license.coscl.org.cn/MulanPSL2
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
 * PURPOSE.
 * See the Mulan PSL v2 for more details.
 * Author:
 * Create: 2025-05-13
 * Description: Hyperthread affinity analysis and binding based on PMU metrics
 ******************************************************************************/
#include <iostream>
#include <vector>
#include <algorithm>
#include <string>
#include <map>
#include <sstream>
#include <fstream>
#include <cstring>
#include <iomanip>
#include <unistd.h>
#include <unordered_map>
#include <unordered_set>
#include <signal.h>
#include <chrono>
#include <pthread.h>
#include <sched.h>
#include <sys/types.h>
#include <dirent.h>
#include <cmath>
#include <numeric>
#include "pcerrc.h"
#include "pmu.h"
#include "symbol.h"

// Forward declarations for functions used before their definitions
int getCpuCore(int pid);
std::vector<int> parseCpuRange(const std::string &rangeStr);
std::string getCpuAffinityList(int pid);
bool hasCommonCpu(const unsigned *cpuArray, size_t arraySize, const std::vector<int> &cpuVector);
std::string GetL3CMissPercent(unsigned llc_miss, unsigned llc_cache);
std::vector<int> findProcessesByKeyword(const std::string& keyword);
void totalDDRCBandwidth();
void initNumaToCoreList();

static std::map<unsigned, double> numaTotalDDRC;
static std::unordered_map<unsigned, unsigned*> numaToCpuCore;
static std::unordered_map<unsigned, unsigned> numaToCpuNumber;
static std::vector<int> pidBoundCpus;
static unsigned numaNum = 0;

const int FLOAT_PRECISION = 2;
const int TIME_UNIT_TRANS = 1000;

uint64_t topNum = 0;
uint64_t duration = 0;
uint64_t period = 0;

// Enhanced thread metrics for hyperthread affinity analysis
struct ThreadAffinityMetrics {
    int pid;                    // Process ID
    unsigned tid;               // Thread ID
    std::string comm;           // Process name

    // PMU metrics
    uint64_t cpu_cycles;        // CPU cycles
    uint64_t instructions;      // Instructions retired
    uint64_t llc_misses;        // LLC misses
    uint64_t llc_references;    // LLC references
    uint64_t l1_misses;         // L1 cache misses
    uint64_t l1_references;     // L1 cache references
    uint64_t l2_misses;         // L2 cache misses
    uint64_t l2_references;     // L2 cache references
    uint64_t branch_misses;     // Branch mispredictions
    uint64_t branches;          // Branch instructions

    // Derived metrics
    double cpu_intensity;       // CPU intensive score (0-1)
    double cache_pressure;      // Cache pressure score (0-1) - replaces memory_intensity
    double cache_friendliness;  // Cache friendly score (0-1)
    double temporal_pattern;    // Temporal access pattern (0-1)
    double affinity_score;      // Overall affinity score for pairing

    ThreadAffinityMetrics() 
        : pid(0), tid(0), comm(""), cpu_cycles(0), instructions(0),
          llc_misses(0), llc_references(0), l1_misses(0), l1_references(0),
          l2_misses(0), l2_references(0), branch_misses(0), branches(0), 
          cpu_intensity(0.0), cache_pressure(0.0), cache_friendliness(0.0), 
          temporal_pattern(0.0), affinity_score(0.0) {}

    ThreadAffinityMetrics(int p, unsigned t, const std::string& process_name = "")
        : pid(p), tid(t), comm(process_name), cpu_cycles(0), instructions(0),
          llc_misses(0), llc_references(0), l1_misses(0), l1_references(0),
          l2_misses(0), l2_references(0), branch_misses(0), branches(0), 
          cpu_intensity(0.0), cache_pressure(0.0), cache_friendliness(0.0), 
          temporal_pattern(0.0), affinity_score(0.0) {}

    // Calculate derived metrics from PMU data
    void calculateMetrics() {
        // CPU intensity: based on CPI (Cycles Per Instruction)
        if (instructions > 0) {
            double cpi = static_cast<double>(cpu_cycles) / instructions;
            cpu_intensity = std::min(cpi / 2.0, 1.0); // Normalize to 0-1
        }

        // Cache pressure: based on cache miss rates across all levels
        if (l1_references > 0 && l2_references > 0 && llc_references > 0) {
            double l1_miss_rate = static_cast<double>(l1_misses) / l1_references;
            double l2_miss_rate = static_cast<double>(l2_misses) / l2_references;
            double l3_miss_rate = static_cast<double>(llc_misses) / llc_references;
            cache_pressure = (l1_miss_rate + l2_miss_rate + l3_miss_rate) / 3.0;
        }

        // Cache friendliness: based on cache hit rates
        if (l1_references > 0 && l2_references > 0 && llc_references > 0) {
            double l1_hit_rate = 1.0 - static_cast<double>(l1_misses) / l1_references;
            double l2_hit_rate = 1.0 - static_cast<double>(l2_misses) / l2_references;
            double l3_hit_rate = 1.0 - static_cast<double>(llc_misses) / llc_references;
            cache_friendliness = (l1_hit_rate + l2_hit_rate + l3_hit_rate) / 3.0;
        }

        // Temporal pattern: based on branch prediction accuracy
        if (branches > 0) {
            double branch_accuracy = 1.0 - static_cast<double>(branch_misses) / branches;
            temporal_pattern = branch_accuracy;
        }
    }

    // Calculate affinity score with another thread
    double calculateAffinityWith(const ThreadAffinityMetrics& other) const {
        // CPU complementarity: one high CPU + one low CPU = good
        double cpu_complementarity = 1.0 - std::abs(cpu_intensity - other.cpu_intensity);

        // Cache pressure complementarity: one high cache pressure + one low cache pressure = good
        double cache_pressure_complementarity = 1.0 - std::abs(cache_pressure - other.cache_pressure);

        // Cache complementarity: different cache behaviors = good
        double cache_complementarity = 1.0 - std::abs(cache_friendliness - other.cache_friendliness);

        // Temporal complementarity: different temporal patterns = good
        double temporal_complementarity = 1.0 - std::abs(temporal_pattern - other.temporal_pattern);

        // Weighted affinity score
        return cpu_complementarity * 0.4 +
               cache_pressure_complementarity * 0.3 +
               cache_complementarity * 0.2 +
               temporal_complementarity * 0.1;
    }

    // Get thread type for classification
    std::string getThreadType() const {
        if (cpu_intensity > 0.7) return "CPU-Intensive";
        if (cache_pressure > 0.7) return "Cache-Pressure";
        if (cache_friendliness > 0.8) return "Cache-Friendly";
        if (temporal_pattern > 0.8) return "Temporal-Regular";
        return "Mixed";
    }
};

// Get system CPU core count
int getSystemCpuCount() {
    return 80; // Assuming 0-79 range
}

// Get physical core mapping for hyperthreading
std::vector<std::pair<int, int>> getPhysicalCoreMapping() {
    std::vector<std::pair<int, int>> physicalCores;
    for (int i = 0; i < 80; i += 2) {
        physicalCores.push_back(std::make_pair(i, i + 1));
    }
    return physicalCores;
}

// Bind thread to specified CPU core
bool bindThreadToCore(int tid, int coreId) {
    cpu_set_t cpuset;
    CPU_ZERO(&cpuset);
    CPU_SET(coreId, &cpuset);

    if (sched_setaffinity(tid, sizeof(cpu_set_t), &cpuset) == -1) {
        std::cerr << "Failed to bind thread " << tid << " to core " << coreId
                  << ": " << strerror(errno) << std::endl;
        return false;
    }
    return true;
}

// Collect comprehensive PMU data for threads
std::vector<ThreadAffinityMetrics> collectThreadAffinityData(const std::vector<int>& processList) {
    std::vector<ThreadAffinityMetrics> threads;

    if (processList.empty()) {
        return threads;
    }

    // Get CPU affinity for all processes
    std::vector<int> validPids;
    std::vector<int> allBoundCpus;
    std::unordered_set<int> uniqueCpus;

    for (const auto& pid : processList) {
        if (getCpuCore(pid) != -1) {
            validPids.push_back(pid);
            for (const auto& cpu : pidBoundCpus) {
                if (uniqueCpus.find(cpu) == uniqueCpus.end()) {
                    allBoundCpus.push_back(cpu);
                    uniqueCpus.insert(cpu);
                }
            }
        }
    }

    if (validPids.empty()) {
        return threads;
    }

    // Extended PMU events for comprehensive analysis (ARMv8 specific)
    char* evtList[10];
    evtList[0] = (char*)"cpu-cycles";           // CPU cycles
    evtList[1] = (char*)"instructions";         // Instructions retired
    evtList[2] = (char*)"r33";                  // LLC misses (keep as is)
    evtList[3] = (char*)"r32";                  // LLC references (keep as is)
    evtList[4] = (char*)"l1d_cache_refill";     // L1 data cache refills (ARMv8)
    evtList[5] = (char*)"l1d_cache";            // L1 data cache accesses (ARMv8)
    evtList[6] = (char*)"l2d_cache_refill";     // L2 data cache refills (ARMv8)
    evtList[7] = (char*)"l2d_cache";            // L2 data cache accesses (ARMv8)
    evtList[8] = (char*)"br_mis_pred";          // Branch mispredictions (ARMv8)
    evtList[9] = (char*)"br_pred";              // Branch predictions (ARMv8)

    PmuAttr attr = {0};
    attr.evtList = evtList;
    attr.numEvt = 10;
    attr.pidList = validPids.data();
    attr.numPid = validPids.size();
    attr.cpuList = allBoundCpus.data();
    attr.numCpu = allBoundCpus.size();

    int pd = PmuOpen(COUNTING, &attr);
    if (pd == -1) {
        std::cerr << "PmuOpen failed for multiple PIDs" << std::endl;
        std::cerr << "Error msg: " << Perror() << std::endl;
        return threads;
    }

    PmuEnable(pd);
    sleep(duration);

    PmuData* pmuData = nullptr;
    int len = PmuRead(pd, &pmuData);
    if (len == -1) {
        std::cerr << "PmuRead failed" << std::endl;
        std::cerr << "Error msg: " << Perror() << std::endl;
        PmuDisable(pd);
        PmuClose(pd);
        return threads;
    }

    // Create mapping to store PMU data for each thread
    std::unordered_map<unsigned, ThreadAffinityMetrics> tidData;
    std::unordered_map<unsigned, int> tidToPid;
    std::unordered_map<unsigned, std::string> tidToComm;

    // Process collected data
    for (int i = 0; i < len; ++i) {
        PmuData& data = pmuData[i];
        unsigned tid = data.tid;
        int pid = data.pid;

        tidToPid[tid] = pid;
        if (data.comm != nullptr) {
            tidToComm[tid] = std::string(data.comm);
        }

        // Initialize thread metrics if not exists
        if (tidData.find(tid) == tidData.end()) {
            tidData.emplace(tid, ThreadAffinityMetrics(pid, tid, tidToComm[tid]));
        }

        // Accumulate PMU data
        if (strcmp(data.evt, "cpu-cycles") == 0) {
            tidData[tid].cpu_cycles += data.count;
        } else if (strcmp(data.evt, "instructions") == 0) {
            tidData[tid].instructions += data.count;
        } else if (strcmp(data.evt, "r33") == 0) {
            tidData[tid].llc_misses += data.count;
        } else if (strcmp(data.evt, "r32") == 0) {
            tidData[tid].llc_references += data.count;
        } else if (strcmp(data.evt, "l1d_cache_refill") == 0) {
            tidData[tid].l1_misses += data.count;
        } else if (strcmp(data.evt, "l1d_cache") == 0) {
            tidData[tid].l1_references += data.count;
        } else if (strcmp(data.evt, "l2d_cache_refill") == 0) {
            tidData[tid].l2_misses += data.count;
        } else if (strcmp(data.evt, "l2d_cache") == 0) {
            tidData[tid].l2_references += data.count;
        } else if (strcmp(data.evt, "br_mis_pred") == 0) {
            tidData[tid].branch_misses += data.count;
        } else if (strcmp(data.evt, "br_pred") == 0) {
            tidData[tid].branches += data.count;
        }
    }

    PmuDataFree(pmuData);
    PmuDisable(pd);
    PmuClose(pd);

    // Calculate derived metrics and convert to vector
    for (auto it = tidData.begin(); it != tidData.end(); ++it) {
        unsigned tid = it->first;
        ThreadAffinityMetrics& metrics = it->second;
        metrics.calculateMetrics();
        threads.push_back(metrics);
    }

    return threads;
}

// Print thread affinity metrics
void PrintThreadAffinityMetrics(const std::vector<ThreadAffinityMetrics>& threads) {
    std::cout << std::string(160, '=') << std::endl;
    std::cout << "Thread Hyperthread Affinity Analysis" << std::endl;
    std::cout << std::string(160, '-') << std::endl;
    std::cout << std::setw(6) << "Index" << std::setw(8) << std::left << "PID"
              << std::setw(8) << "TID" << std::setw(16) << "Process"
              << std::setw(12) << "Type"
              << std::setw(10) << "CPU-Int" << std::setw(10) << "Cache-P"
              << std::setw(10) << "Cache-F" << std::setw(10) << "Temporal"
              << std::setw(12) << "CPI" << std::setw(12) << "LLC-Miss%"
              << std::setw(12) << "Branch-Miss%" << std::endl;
    std::cout << std::string(160, '-') << std::endl;

    for (size_t i = 0; i < threads.size(); ++i) {
        const auto& thread = threads[i];
        std::string process_name = thread.comm.empty() ? "unknown" : thread.comm;
        if (process_name.length() > 14) {
            process_name = process_name.substr(0, 11) + "...";
        }

        double cpi = (thread.instructions > 0) ?
            static_cast<double>(thread.cpu_cycles) / thread.instructions : 0.0;
        double llc_miss_rate = (thread.llc_references > 0) ?
            static_cast<double>(thread.llc_misses) / thread.llc_references * 100.0 : 0.0;
        double branch_miss_rate = (thread.branches > 0) ?
            static_cast<double>(thread.branch_misses) / thread.branches * 100.0 : 0.0;

        std::cout << std::setw(6) << i << std::setw(8) << std::left << thread.pid
                  << std::setw(8) << thread.tid << std::setw(16) << process_name
                  << std::setw(12) << thread.getThreadType()
                  << std::setw(10) << std::fixed << std::setprecision(3) << thread.cpu_intensity
                  << std::setw(10) << std::fixed << std::setprecision(3) << thread.cache_pressure
                  << std::setw(10) << std::fixed << std::setprecision(3) << thread.cache_friendliness
                  << std::setw(10) << std::fixed << std::setprecision(3) << thread.temporal_pattern
                  << std::setw(12) << std::fixed << std::setprecision(2) << cpi
                  << std::setw(12) << std::fixed << std::setprecision(2) << llc_miss_rate << "%"
                  << std::setw(12) << std::fixed << std::setprecision(2) << branch_miss_rate << "%" << std::endl;
    }

    std::cout << std::string(160, '_') << std::endl;
}

// Find optimal thread pairs for hyperthreading
std::vector<std::pair<int, int>> findOptimalThreadPairs(const std::vector<ThreadAffinityMetrics>& threads) {
    std::vector<std::pair<int, int>> pairs;
    std::vector<bool> used(threads.size(), false);

    // Calculate affinity scores for all thread pairs
    std::vector<std::tuple<double, int, int>> affinityScores;

    for (size_t i = 0; i < threads.size(); ++i) {
        for (size_t j = i + 1; j < threads.size(); ++j) {
            double score = threads[i].calculateAffinityWith(threads[j]);
            affinityScores.emplace_back(score, i, j);
        }
    }

    // Sort by affinity score (highest first)
    std::sort(affinityScores.rbegin(), affinityScores.rend());

    // Greedy pairing
    for (const auto& affinityTuple : affinityScores) {
        double score = std::get<0>(affinityTuple);
        int i = std::get<1>(affinityTuple);
        int j = std::get<2>(affinityTuple);
        if (!used[i] && !used[j]) {
            pairs.emplace_back(i, j);
            used[i] = used[j] = true;
        }
    }

    return pairs;
}

// Bind threads optimally to hyperthreaded cores
void bindThreadsWithHyperthreadAffinity(std::vector<ThreadAffinityMetrics>& threads) {
    if (threads.empty()) {
        std::cout << "No valid thread data found" << std::endl;
        return;
    }

    // Find optimal thread pairs
    auto pairs = findOptimalThreadPairs(threads);

    if (pairs.empty()) {
        std::cout << "No thread pairs found for binding" << std::endl;
        return;
    }

    std::cout << std::string(100, '=') << std::endl;
    std::cout << "Hyperthread Affinity-Based Thread Binding" << std::endl;
    std::cout << std::string(100, '-') << std::endl;

    // Bind pairs to physical cores
    auto physicalCores = getPhysicalCoreMapping();

    for (size_t i = 0; i < pairs.size() && i < physicalCores.size(); ++i) {
        const auto& pair = pairs[i];
        const auto& physicalCore = physicalCores[i];

        int thread1_idx = pair.first;
        int thread2_idx = pair.second;

        const auto& thread1 = threads[thread1_idx];
        const auto& thread2 = threads[thread2_idx];

        int logical_core1 = physicalCore.first;   // Hyperthread 0
        int logical_core2 = physicalCore.second;  // Hyperthread 1

        // Bind threads to logical cores
        bindThreadToCore(thread1.tid, logical_core1);
        bindThreadToCore(thread2.tid, logical_core2);

        // Calculate affinity score
        double affinity_score = thread1.calculateAffinityWith(thread2);

        std::string process1_name = thread1.comm.empty() ? "unknown" : thread1.comm;
        std::string process2_name = thread2.comm.empty() ? "unknown" : thread2.comm;

        if (process1_name.length() > 12) process1_name = process1_name.substr(0, 9) + "...";
        if (process2_name.length() > 12) process2_name = process2_name.substr(0, 9) + "...";

        std::cout << "Physical Core " << (i) << " (Logical " << logical_core1 << "," << logical_core2 << "):" << std::endl;
        std::cout << "  Thread " << std::setw(6) << thread1.tid
                  << " (" << std::setw(12) << std::left << process1_name << ") "
                  << "[" << thread1.getThreadType() << "] -> Core " << logical_core1 << std::endl;
        std::cout << "  Thread " << std::setw(6) << thread2.tid
                  << " (" << std::setw(12) << std::left << process2_name << ") "
                  << "[" << thread2.getThreadType() << "] -> Core " << logical_core2 << std::endl;
        std::cout << "  Affinity Score: " << std::fixed << std::setprecision(3) << affinity_score << std::endl;
        std::cout << std::endl;
    }

    std::cout << std::string(100, '_') << std::endl;
    std::cout << "Bound " << pairs.size() << " thread pairs to " << pairs.size() << " physical cores" << std::endl;
}

// Legacy functions (keeping for compatibility)
void totalDDRCBandwidth() {
    PmuDeviceAttr devAttr[2];
    devAttr[0].metric = PMU_DDR_READ_BW;
    devAttr[1].metric = PMU_DDR_WRITE_BW;
    int pd = PmuDeviceOpen(devAttr, 2);
    PmuEnable(pd);
    sleep(1);
    PmuData *oriData = nullptr;
    int oriLen = PmuRead(pd, &oriData);
    PmuDeviceData *devData = nullptr;
    auto len = PmuGetDevMetric(oriData, oriLen, devAttr, 2, &devData);
    std::unordered_map<int, double> stats;
    for (int i = 0; i < len; ++i) {
        stats[devData[i].ddrNumaId] += devData[i].count / 1024 / 1024;
    }
    for (const auto &entry : stats) {
        int id = entry.first;
        double sum = entry.second;
        numaTotalDDRC[id] = sum;
    }
    numaNum = numaTotalDDRC.size();
    DevDataFree(devData);
    PmuDataFree(oriData);
    PmuDisable(pd);
}

void initNumaToCoreList() {
    unsigned *coreList;
    for (unsigned i = 0; i < numaNum; ++i) {
        coreList = nullptr;
        int len = PmuGetNumaCore(i, &coreList);
        numaToCpuCore[i] = coreList;
        numaToCpuNumber[i] = len;
    }
}

std::vector<int> parseCpuRange(const std::string &rangeStr) {
    std::vector<int> cpus;
    std::stringstream ss(rangeStr);
    std::string part;

    while(getline(ss, part, ',')) {
        size_t hyphen_pos = part.find("-");
        if (hyphen_pos != std::string::npos) {
            int start = std::stoi(part.substr(0, hyphen_pos));
            int end = std::stoi(part.substr(hyphen_pos + 1));
            if (start > end) {
                std::cerr << "Invalid CPU range: " << part << std::endl;
            }
            for (int i = start; i <= end; ++i) {
                cpus.push_back(i);
            }
        } else {
            cpus.push_back(std::stoi(part));
        }
    }

    std::sort(cpus.begin(), cpus.end());
    cpus.erase(unique(cpus.begin(), cpus.end()), cpus.end());
    return cpus;
}

std::string getCpuAffinityList(int pid) {
    std::string path = "/proc/" + std::to_string(pid) + "/status";
    std::ifstream in(path);
    if (!in.is_open()) {
        std::cerr << "Not found: " << path << std::endl;
        return "";
    }
    std::string line;
    const std::string targetKey = "Cpus_allowed_list:";
    while (getline(in, line)) {
        if (line.find(targetKey) == 0) {
            size_t pos = line.find("\t");
            if (pos == std::string::npos)
                pos = targetKey.length();
            return line.substr(pos + 1);
        }
    }
    return "";
}

int getCpuCore(int pid) {
    try {
        std::string rangeStr = getCpuAffinityList(pid);
        if (rangeStr == "") {
            return -1;
        }
        pidBoundCpus = parseCpuRange(rangeStr);
    } catch (const std::exception &e) {
        std::cerr << "Error: " << e.what() << std::endl;
        return 1;
    }
    return 0;
}

bool hasCommonCpu(const unsigned *cpuArray, size_t arraySize, const std::vector<int> &cpuVector) {
    if (cpuArray == nullptr || arraySize == 0 || cpuVector.empty()) {
        return false;
    }

    if (arraySize < cpuVector.size()) {
        std::unordered_set<unsigned> arraySet(cpuArray, cpuArray + arraySize);
        for (const auto &cpu : cpuVector) {
            if (arraySet.count(cpu) > 0) {
                return true;
            }
        }
    } else {
        std::unordered_set<unsigned> vecSet(cpuVector.begin(), cpuVector.end());
        for (size_t i = 0; i < arraySize; ++i) {
            if (vecSet.count(cpuArray[i]) > 0) {
                return true;
            }
        }
    }

    return false;
}

std::string GetL3CMissPercent(unsigned llc_miss, unsigned llc_cache) {
    std::ostringstream oss;
    double ratio = llc_cache != 0 ? static_cast<double>(llc_miss) / llc_cache * 100.0 : 0.0;
    oss << std::fixed << std::setprecision(FLOAT_PRECISION) << ratio;
    return oss.str();
}

std::vector<int> findProcessesByKeyword(const std::string& keyword) {
    std::vector<int> matchedPids;
    DIR *dir = opendir("/proc");
    if (!dir) {
        std::cerr << "Failed to open /proc directory" << std::endl;
        return matchedPids;
    }

    struct dirent *entry;
    while ((entry = readdir(dir)) != nullptr) {
        if (entry->d_type == DT_DIR) {
            char *endptr;
            long pid = strtol(entry->d_name, &endptr, 10);
            if (*endptr == '\0') {
                std::string cmdlinePath = "/proc/" + std::string(entry->d_name) + "/cmdline";
                std::ifstream cmdlineFile(cmdlinePath);
                if (cmdlineFile.is_open()) {
                    std::string cmdline;
                    std::getline(cmdlineFile, cmdline);
                    if (cmdline.find(keyword) != std::string::npos) {
                        matchedPids.push_back(pid);
                    }
                }
            }
        }
    }
    closedir(dir);

    std::cout << "Found " << matchedPids.size() << " processes containing keyword '" << keyword << "'" << std::endl;
    for (const auto& pid : matchedPids) {
        std::cout << "PID: " << pid << std::endl;
    }

    return matchedPids;
}

void print_usage() {
    std::cerr << "Usage: hyperthread_affinity_bind <threshold> <topNum> <duration> <period> <process_keyword>\n";
    std::cerr << "--threshold : the collect threshold of total ddrc bandwidth, unit M/s\n";
    std::cerr << "--topNum : the top N thread of affinity analysis\n";
    std::cerr << "--duration : the total collect time of PMU data, unit s\n";
    std::cerr << "--period : the period of PMU data collect, unit ms\n";
    std::cerr << "--process_keyword : the keyword to match process names\n";
    std::cerr << " example: hyperthread_affinity_bind 100 10 10 1000 benchmark\n";
    std::cerr << "\n";
    std::cerr << "Features:\n";
    std::cerr << "- Comprehensive PMU-based thread affinity analysis\n";
    std::cerr << "- CPU-intensive, memory-intensive, cache-friendly, temporal pattern assessment\n";
    std::cerr << "- Optimal hyperthread pairing based on resource complementarity\n";
    std::cerr << "- Automatic thread binding to maximize hyperthread efficiency\n";
}

int main(int argc, char** argv) {
    if (argc < 6) {
        print_usage();
        return 0;
    }

    double threshold = 0.0;
    std::string processKeyword;
    bool collectAffinityFlag = false;

    try {
        threshold = std::stod(argv[1]);
        if (threshold <= 0) {
            throw std::invalid_argument("threshold must be a positive number.");
        }

        topNum = std::stod(argv[2]);
        if (topNum <= 0) {
            throw std::invalid_argument("TopNum must be a positive number.");
        }

        duration = std::stod(argv[3]);
        if (duration <= 0) {
            throw std::invalid_argument("Duration must be a positive number.");
        }

        period = std::stoi(argv[4]);
        if (period <= 0) {
            throw std::invalid_argument("Period must be a positive integer.");
        }

        processKeyword = argv[5];
    } catch (const std::exception& e) {
        std::cerr << "Error parsing arguments: " << e.what() << "\n";
        print_usage();
        return EXIT_FAILURE;
    }

    // Find all processes containing the keyword
    std::vector<int> matchedPids = findProcessesByKeyword(processKeyword);
    if (matchedPids.empty()) {
        std::cerr << "No processes found containing keyword '" << processKeyword << "'" << std::endl;
        return EXIT_FAILURE;
    }

    totalDDRCBandwidth();
    initNumaToCoreList();

    // Check if NUMA bandwidth exceeds threshold
    for (const auto &data : numaTotalDDRC) {
        std::cout << "Numa ID: " << data.first << ", total bandwidth: " << data.second << "M/s";
        if (data.second > threshold) {
            std::cout << " --> Exceeds threshold, need to collect affinity data";
            collectAffinityFlag = true;
        } else {
            std::cout << " --> Does not exceed threshold";
        }
        std::cout << std::endl;
    }

    if (collectAffinityFlag) {
        // Collect comprehensive PMU data for thread affinity analysis
        std::vector<ThreadAffinityMetrics> threads = collectThreadAffinityData(matchedPids);

        // Print thread affinity metrics
        PrintThreadAffinityMetrics(threads);

        // Bind threads optimally using hyperthread affinity analysis
        bindThreadsWithHyperthreadAffinity(threads);
    }

    return 0;
}