// *******************************************************************
// >> File Name: /home/diabio/project/al_cache/al_cache/cache_algorithm_tester.h
// >> Author: diabiolin
// >> Created Time: Sun 13 Apr 2025 01:07:56 PM CST
// >> Description: Cache algorithm testing tool
// *******************************************************************

#pragma once 
#include <csignal>
#include <iostream>
#include <random>
#include <vector>
#include <thread>
#include <iomanip>
#include <sys/resource.h> 
#include <atomic>
#include <chrono>
#include <limits.h>
#include <unordered_map>

// 定义操作类型枚举
enum class OperationType{
    PUT,
    GET              
};

// 定义缓存操作序列结构体 
template <typename Key, typename Value>
struct CacheOperation{
    OperationType type;
    Key key;
    Value value;
};

// 数据序列生成基类
template <typename Key, typename Value>
class DataGeneratorBase{
public:
    virtual std::vector<CacheOperation<Key, Value>> generate(int numOperations) = 0;
    virtual ~DataGeneratorBase() = default;
};

// 数据序列生成器 
// 热点数据生成器
template <typename Key, typename Value>
class HotDataGenerator : public DataGeneratorBase<Key, Value>{
public:
    HotDataGenerator(int hot_keys = 10, int  cold_keys = 90, double hot_ratio = 0.7)
        : hot_keys_(hot_keys),
          cold_keys_(cold_keys),
          hot_ratio_(hot_ratio) {}

    std::vector<CacheOperation<Key, Value>> generate(int numOperations) override {
        std::vector<CacheOperation<Key, Value>> sequence;
        std::random_device rd;
        std::mt19937 gen(rd());

        for(int op = 0; op < numOperations; op++) {
            Key key;
            if((double)gen() / gen.max() < hot_ratio_) { // 70%热点数据 
                key = static_cast<Key>(gen() % hot_keys_);
            } else { // 30%冷数据
                key = static_cast<Key>(hot_keys_ + (gen() % cold_keys_));
            }

            // 随机选择 PUT 或 GET 操作
            OperationType opType = (gen() % 2 == 0) ? OperationType::PUT : OperationType::GET;
            sequence.push_back({opType, key, static_cast<Value>(gen())});
        }
        return sequence;
    }

private:
    int hot_keys_;
    int cold_keys_;
    double hot_ratio_;
};

// 循环扫描数据生成器
template <typename Key, typename Value>
class LoopScanDataGenerator : public DataGeneratorBase<Key, Value>{
public:
    LoopScanDataGenerator(int loop_size = 500, double sequential_ratio = 0.6, double random_ratio = 0.3, double put_ratio = 0.3)
        : loop_size_(loop_size),
          sequential_ratio_(sequential_ratio),
          random_ratio_(random_ratio),
          put_ratio_(put_ratio) {}

    std::vector<CacheOperation<Key, Value>> generate(int numOperations) override{
        std::vector<CacheOperation<Key, Value>> sequence;
        std::random_device rd;
        std::mt19937 gen(rd());

        // 先填充数据
        for(Key key = 0; key < static_cast<Key>(loop_size_); key++){
            sequence.push_back({OperationType::PUT, key, static_cast<Value>(gen())});
        }

        // 然后进行访问测试
        Key current_pos = 0;
        for(int op = 0; op < numOperations; op++){
            Key key;
            double r = (double)gen() / gen.max();
            if(r < sequential_ratio_){ // 60%顺序扫描
                key = current_pos;
                current_pos = static_cast<Key>((current_pos + 1) % loop_size_);
            } else if(r < sequential_ratio_ + random_ratio_){ // 30%随机跳跃
                key = static_cast<Key>(gen() % loop_size_);
            } else { // 10%访问范围外的数据
                key = static_cast<Key>(loop_size_ + (gen() % loop_size_));
            }

            sequence.push_back({OperationType::GET, key, static_cast<Value>(0)});

            // 随机进行put操作，更新缓存内容
            if((double)gen() / gen.max() < put_ratio_){ // 30%概率进行put
                sequence.push_back({OperationType::PUT, key, static_cast<Value>(gen())});
            }
        }
        return sequence;
    }

private:
    int loop_size_;
    double sequential_ratio_;
    double random_ratio_;
    double put_ratio_;
};

// 工作负载剧烈变化数据生成器
template <typename Key, typename Value>
class WorkloadChangeDataGenerator : public DataGeneratorBase<Key, Value>{
public:
    WorkloadChangeDataGenerator(int phase_length_ratio = 5, double put_ratio = 0.3)
        : phase_length_ratio_(phase_length_ratio),
          put_ratio_(put_ratio) {}

    std::vector<CacheOperation<Key, Value>> generate(int numOperations) override {
        std::vector<CacheOperation<Key, Value>> sequence;
        std::random_device rd;
        std::mt19937 gen(rd());
        int phase_length = numOperations / phase_length_ratio_;

        // 先填充一些初始数据
        for(Key key = 0; key < static_cast<Key>(1000); key++){
            sequence.push_back({OperationType::PUT, key, static_cast<Value>(gen())});
        }

        // 然后进行多阶段测试
        for(int op = 0; op < numOperations; op++){
            Key key;
            // 根据不同阶段选择不同的访问模式
            if(op < phase_length){ // 热点访问
                key = static_cast<Key>(gen() % 5);
            } else if(op < phase_length * 2){ // 大范围随机 
                key = static_cast<Key>(gen() % 1000);
            } else if(op < phase_length * 3){ // 顺序扫描 
                key = static_cast<Key>((op - phase_length * 2) % 100);
            } else if(op < phase_length * 4){ // 局部性随机
                int locality = (op / 1000) % 10;
                key = static_cast<Key>(locality * 20 + (gen() % 20));
            } else { // 混合访问
                int r = gen() % 100;
                if(r < 30){
                    key = static_cast<Key>(gen() % 5);
                } else if(r < 60){
                    key = static_cast<Key>(5 + (gen() % 95));
                } else {
                    key = static_cast<Key>(100 + (gen() % 900));
                }
            }

            sequence.push_back({OperationType::GET, key, static_cast<Value>(0)});

            // 随机进行put操作，更新缓存内容 
            if((double)gen() / gen.max() < put_ratio_){
                sequence.push_back({OperationType::PUT, key, static_cast<Value>(gen())});
            }
        }

        return sequence;
    }

private:
    int phase_length_ratio_;
    double put_ratio_;
};

namespace al_cache{

// 模拟从数据源获取数据 
template <typename Key, typename Value>
Value fetchFromDataSource(const Key& key, bool isUsing){
    // 模拟耗时操作
    if(isUsing){
        std::this_thread::sleep_for(std::chrono::milliseconds(1));
    }
    return static_cast<Value>(key);
}

// 缓存算法测试类模板
template <typename Key, typename Value, template<typename K, typename V> class CacheType>
class CacheAlgorithmTester{
public:
    CacheAlgorithmTester(int capacity, int numOperations, std::vector<int> threadCounts, 
                         std::vector<std::string> dataTypes, bool usingSimulationDataSources, 
                         const std::vector<std::pair<std::reference_wrapper<CacheType<Key, Value>>, std::string>>& caches);
    // 运行所有测试场景
    void runAllTests();

private:
    // 运行指定线程数量和数据类型的测试
    void runTest(CacheType<Key, Value>& cache, const std::string& algorithmName, int threadCount, const std::string& dataType);
    // 根据数据类型生成相应的数据
    void generateData();
    // 获取当前进程的内存使用情况
    size_t getMemoryUsage();
    // 打印所有测试结果的表格
    void printResults();

private:
    // 测试结果结构体，用于存储每次测试的各项指标
    struct TestResult{
        std::string algorithmName;
        int threadCount;
        std::string dataType;
        double runTime;
        double throughput;
        double avgLatency;
        double minLatency;
        double maxLatency;
        double hitRate;
        size_t memoryUsage;
    };
    std::vector<TestResult> testResults_; // 存储所有测试结果 
    int capacity_; // 缓存容量
    std::vector<int> threadCounts_; // 要测试的线程数量
    std::vector<std::string> dataTypes_; // 要测试的数据类型（hot、loop_scan、workload_change）
    std::unordered_map<std::string, std::vector<CacheOperation<Key, Value>>> test_data_sequences_;
    bool usingSimulationDataSources_; // 是否使用模拟读取数据源操作
    int numOperations_; // 操作次数
    std::vector<std::pair<std::reference_wrapper<CacheType<Key, Value>>, std::string>> caches_;
};

// 构造函数实现
template <typename Key, typename Value, template<typename K, typename V> class CacheType>
CacheAlgorithmTester<Key, Value, CacheType>::CacheAlgorithmTester(int capacity, int numOperations, 
                                                                 std::vector<int> threadCounts, std::vector<std::string> dataTypes, 
                                                                 bool usingSimulationDataSources, 
                                                                 const std::vector<std::pair<std::reference_wrapper<CacheType<Key, Value>>, std::string>>& caches)
    : capacity_(capacity),
      threadCounts_(threadCounts),
      dataTypes_(dataTypes),
      usingSimulationDataSources_(usingSimulationDataSources),
      numOperations_(numOperations),
      caches_(caches) {}

// 运行所有测试场景的函数实现
template <typename Key, typename Value, template<typename K, typename V> class CacheType>
void CacheAlgorithmTester<Key, Value, CacheType>::runAllTests(){
    // 生成测试数据序列
    generateData();

    // 遍历不同的线程数量和数据类型组合进行测试
    for(const auto& cachePair : caches_){
        auto& cache = cachePair.first.get();
        const auto& algorithmName = cachePair.second;
        for(int threadCount : threadCounts_){
            for(const auto& dataType : dataTypes_){
                runTest(cache, algorithmName, threadCount, dataType);
            }
        }
    }
    // 打印所有测试结果
    printResults();
}

// 运行指定线程数量和数据类型的测试的函数实现
template <typename Key, typename Value, template<typename K, typename V> class CacheType>
void CacheAlgorithmTester<Key, Value, CacheType>::runTest(CacheType<Key, Value>& cache, 
                                                          const std::string& algorithmName, int threadCount, const std::string& dataType){
    std::vector<std::thread> threads;  // 存储线程对象
    std::atomic<int> hitCount(0);  // 原子变量，用于记录命中率
    std::atomic<int> missCount(0);  // 原子变量， 用于记录未命中次数
    std::atomic<long long> totalLatency(0);  // 原子变量， 用于记录总延迟
    std::atomic<long long> minLatency(LLONG_MAX); // 原子变量，用于记录最小延迟
    std::atomic<long long> maxLatency(LLONG_MIN); // 原子变量，用于记录最大延迟

    // 线程执行函数，进行缓存的读写操作
    auto threadFunction = [&](const std::vector<CacheOperation<Key, Value>>& data){
        int localHitCount = 0;
        int localMissCount = 0;
        long long localTotalLatency = 0;
        long long localMinLatency = LLONG_MAX;
        long long localMaxLatency = LLONG_MIN;

        for(const auto& op : data){
            auto start = std::chrono::high_resolution_clock::now();  // 记录操作开始时间
            Value value;
            if(op.type == OperationType::GET){
                if(cache.get(op.key, value)){ // 尝试从缓存中获取数据
                    localHitCount++;  // 命中，局部未命中次数加1 
                } else {
                    localMissCount++;  // 未命中，局部未命中次数加1 
                    // 模拟从数据源获取数据 
                    value = fetchFromDataSource<Key, Value>(op.key, usingSimulationDataSources_); 
                    cache.put(op.key, value);
                }
            } else if(op.type == OperationType::PUT){
                cache.put(op.key, op.value);
            }
            auto end = std::chrono::high_resolution_clock::now(); // 记录操作结束时间
            long long latency = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
            localTotalLatency += latency;
            localMinLatency = std::min(localMinLatency, latency);
            localMaxLatency = std::max(localMaxLatency, latency);
        }
        // 线程结束时，将局部计数器的值累加到全局计数器上
        hitCount += localHitCount;
        missCount += localMissCount;
        totalLatency += localTotalLatency;
        minLatency = std::min(minLatency.load(), localMinLatency);
        maxLatency = std::max(maxLatency.load(), localMaxLatency);
    };

    std::vector<std::vector<CacheOperation<Key, Value>>> threadData(threadCount);
    const auto& data = test_data_sequences_[dataType];
    size_t chunkSize = data.size() / threadCount;
    for(size_t i = 0; i < threadCount; ++i) {
        size_t start = i * chunkSize;
        size_t end = (i == threadCount - 1) ? data.size() : (i + 1) * chunkSize;
        threadData[i].assign(data.begin() + start, data.begin() + end);
    }

    auto start = std::chrono::high_resolution_clock::now();
    // 创建并启动线程
    for(int i = 0; i < threadCount; ++i){
        threads.emplace_back(threadFunction, threadData[i]);
    }

    // 等待所有线程执行完毕
    for(auto& t : threads){
        t.join();
    }
    auto end = std::chrono::high_resolution_clock::now();  // 记录测试结束时间

    // double runTime = std::chrono::duration_cast<std::chrono::seconds>(end - start).count(); // 秒级精度，小于1s会导致吞吐量计算异常
	auto duration = end - start;
	double runTime = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() / 1000.0;
    int totalAccesses = hitCount + missCount;  // 计算总访问次数
    double throughput = totalAccesses / runTime;  // 计算吞吐量
    double avgLatency = static_cast<double>(totalLatency) / totalAccesses;  // 计算平均延迟
    double hitRate = static_cast<double>(hitCount) / totalAccesses * 100;  // 计算命中率
    size_t memoryUsage = getMemoryUsage();  // 获取内存使用量 

    // 将本测试结果添加到结果列表中
    testResults_.push_back({algorithmName, threadCount, dataType, runTime, throughput, avgLatency,
                            static_cast<double>(minLatency), static_cast<double>(maxLatency), hitRate, memoryUsage});

}

// 生成不同类型的数据 
template <typename Key, typename Value, template<typename K, typename V> class CacheType>
void CacheAlgorithmTester<Key, Value, CacheType>::generateData(){
    for(const std::string& dataType : dataTypes_){
        std::unique_ptr<DataGeneratorBase<Key, Value>> generator;
        if(dataType == "hot"){
            generator = std::make_unique<HotDataGenerator<Key, Value>>();
        } else if(dataType == "loop_scan"){
            generator = std::make_unique<LoopScanDataGenerator<Key, Value>>();
        } else if(dataType == "workload_change"){
            generator = std::make_unique<WorkloadChangeDataGenerator<Key, Value>>();
        }
        if(generator){
            test_data_sequences_[dataType] = generator->generate(numOperations_);
        }
    }
}

// 获取当前进程的内存使用情况的函数实现 
template <typename Key, typename Value, template<typename K, typename V> class CacheType>
size_t CacheAlgorithmTester<Key, Value, CacheType>::getMemoryUsage(){
    // 定义一个rusage 结构体来存储资源使用信息
    struct rusage usage;
    // 获取当前进程的资源使用信息
    if(getrusage(RUSAGE_SELF, &usage) != 0){
        std::cerr << "Failed to get memory usage information.";
        return 0;
    }
    // 返回驻留集大小
    return usage.ru_maxrss;
}
// 打印所有测试结果的表格的函数实现 
template <typename Key, typename Value, template<typename K, typename V> class CacheType>
void CacheAlgorithmTester<Key, Value, CacheType>::printResults(){
    // 设置输出的小数精度为2位
    std::cout << std::fixed << std::setprecision(2);
    // 设置每列最大宽度
    int algorithmNameWidth = 10;
    int threadCountWidth = 8;
    int dataTypeWidth = 15;  
    int runTimeWidth = 10;
    int throughputWidth = 19;
    int avgLatencyWidth = 18;
    int minLatencyWidth = 18;
    int maxLatencyWidth = 18;
    int hitRateWidth = 11;
    int memoryUsageWidth = 16;

    // 打印表格头部的分隔线
    std::cout << "+" << std::string(algorithmNameWidth + 2, '-') << "+"
              << std::string(threadCountWidth + 2, '-') << "+"
              << std::string(dataTypeWidth  + 2, '-') << "+"
              << std::string(runTimeWidth + 2, '-') << "+"
              << std::string(throughputWidth + 2, '-') << "+"
              << std::string(avgLatencyWidth + 2, '-') << "+"
              << std::string(minLatencyWidth + 2, '-') << "+"
              << std::string(maxLatencyWidth + 2, '-') << "+"
              << std::string(hitRateWidth + 2, '-') << "+"
              << std::string(memoryUsageWidth + 2, '-') << "+\n"; 

    // 打印表格的列标题
    std::cout << "| " << std::left << std::setw(algorithmNameWidth) << "Algorithm"
              << " | " << std::left << std::setw(threadCountWidth) << "Threads"
              << " | " << std::left << std::setw(dataTypeWidth) << "Test Case"
              << " | " << std::left << std::setw(runTimeWidth) << "Runtime(s)"
              << " | " << std::left << std::setw(throughputWidth) << "Throughput(ops/sec)"
              << " | " << std::left << std::setw(avgLatencyWidth) << "Avg Latency(μs/op)"
              << " | " << std::left << std::setw(minLatencyWidth) << "Min Latency(μs/op)"
              << " | " << std::left << std::setw(maxLatencyWidth) << "Max Latency(μs/op)"
              << " | " << std::left << std::setw(hitRateWidth) << "Hit Rate(%)"
              << " | " << std::left << std::setw(memoryUsageWidth) << "Memory Usage(KB)"
              << " |\n";

    // 打印表格头部和数据部分的分隔线
    std::cout << "+" << std::string(algorithmNameWidth + 2, '-') << "+"
              << std::string(threadCountWidth + 2, '-') << "+"
              << std::string(dataTypeWidth  + 2, '-') << "+"
              << std::string(runTimeWidth + 2, '-') << "+"
              << std::string(throughputWidth + 2, '-') << "+"
              << std::string(avgLatencyWidth + 2, '-') << "+"
              << std::string(minLatencyWidth + 2, '-') << "+" 
              << std::string(maxLatencyWidth + 2, '-') << "+"
              << std::string(hitRateWidth + 2, '-') << "+"
              << std::string(memoryUsageWidth + 2, '-') << "+\n"; 

    // 遍历所有测试结果
    for(const auto& result : testResults_){
        // 打印每一行的测试结果
        std::cout << "| " << std::left << std::setw(algorithmNameWidth) << result.algorithmName 
                  << " | " << std::right << std::setw(threadCountWidth) << result.threadCount 
                  << " | " << std::right << std::setw(dataTypeWidth) << result.dataType 
                  << " | " << std::right << std::setw(runTimeWidth) << result.runTime 
                  << " | " << std::right << std::setw(throughputWidth) << result.throughput 
                  << " | " << std::right << std::setw(avgLatencyWidth) << result.avgLatency 
                  << " | " << std::right << std::setw(minLatencyWidth) << result.minLatency 
                  << " | " << std::right << std::setw(maxLatencyWidth) << result.maxLatency 
                  << " | " << std::right << std::setw(hitRateWidth) << result.hitRate 
                  << " | " << std::right << std::setw(memoryUsageWidth) << result.memoryUsage 
                  << " |\n";

        // 打印表格底部的分割线
        std::cout << "+" << std::string(algorithmNameWidth + 2, '-') << "+"
                  << std::string(threadCountWidth + 2, '-') << "+"
                  << std::string(dataTypeWidth  + 2, '-') << "+"
                  << std::string(runTimeWidth + 2, '-') << "+"
                  << std::string(throughputWidth + 2, '-') << "+"
                  << std::string(avgLatencyWidth + 2, '-') << "+"
                  << std::string(minLatencyWidth + 2, '-') << "+" 
                  << std::string(maxLatencyWidth + 2, '-') << "+"
                  << std::string(hitRateWidth + 2, '-') << "+"
                  << std::string(memoryUsageWidth + 2, '-') << "+\n"; 
    }
}

} // namespace al_cache
