#include "logger_factory.hpp"
#include "logger_builder.hpp"
#include <iostream>
#include <chrono>
#include <string>
#include <thread>
#include <atomic>
#include <vector>
#include <numeric> // For std::accumulate
#include <algorithm> // For std::sort
#include <cmath> // For std::sqrt
#include <random>
#include <filesystem>

// C++20 barrier 的简易兼容实现
#if __cplusplus < 202002L
#include <condition_variable>
#include <mutex>
class SimpleBarrier {
public:
    explicit SimpleBarrier(std::size_t count) : m_count(count), m_initialCount(count) {}
    void arrive_and_wait() {
        std::unique_lock<std::mutex> lock(m_mutex);
        auto gen = m_generation;
        if (--m_count == 0) {
            m_generation++;
            m_count = m_initialCount;
            m_cv.notify_all();
        } else {
            m_cv.wait(lock, [this, gen] { return gen != m_generation; });
        }
    }
private:
    std::mutex m_mutex;
    std::condition_variable m_cv;
    std::size_t m_count;
    const std::size_t m_initialCount;
    std::size_t m_generation{0};
    std::size_t m_currentGeneration{0};
};
#else
#include <barrier>
using SimpleBarrier = std::barrier<>;
#endif


// test configuration parameters
struct TestConfig {
    static constexpr size_t default_message_count = 1'000'000; // 总消息数
    static constexpr int repeat_count = 3; // 重复次数
    static constexpr std::array<size_t, 5> thread_counts = {1, 2, 4, 8, 16};
    static constexpr std::array<size_t, 3> message_sizes = {64, 128, 256}; // 消息体大小
};


// performance statistics
struct PerfStats {
    double avg_throughput{0.0};      // 每秒消息数 (所有线程的 LOG_INFO 调用总数)
    double std_dev_throughput{0.0};
    double avg_latency{0.0};         // LOG_INFO 调用平均延迟 (微秒)
    double std_dev_latency{0.0};
    double p95_latency{0.0};         // P95 延迟 (微秒)
    double p99_latency{0.0};         // P99 延迟 (微秒)
};


// test result structure
struct TestResult {
    size_t thread_count;
    size_t message_size;
    PerfStats stats;
    std::string formatter_type; // 新增：记录格式化器类型
};


// === Helper Functions (与 Blitz Logger 保持一致) ===
double calculate_mean(const std::vector<double>& values) {
    if (values.empty()) return 0.0;
    return std::accumulate(values.begin(), values.end(), 0.0) / values.size();
}


double calculate_std_dev(const std::vector<double>& values, double mean) {
    if (values.empty()) return 0.0;
    double sum_squares = 0.0;
    for (double value : values) {
        double diff = value - mean;
        sum_squares += diff * diff;
    }
    return std::sqrt(sum_squares / values.size());
}


double calculate_percentile(std::vector<double> values, double percentile) {
    if (values.empty()) return 0.0;
    std::sort(values.begin(), values.end());
    size_t index = static_cast<size_t>(values.size() * percentile);
    if (index >= values.size()) index = values.size() - 1; // handle 1.0 percentile or rounding
    return values[index];
}


std::string generate_message(size_t size) {
    static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
    static thread_local std::mt19937 gen(std::random_device{}()); // 每个线程独立的随机数生成器
    static thread_local std::uniform_int_distribution<> dis(0, sizeof(charset) - 2);

    std::string str(size - 1, '\0');
    for (size_t i = 0; i < size - 1; ++i) {
        str[i] = charset[dis(gen)];
    }
    return str;
}


// === Print Test Results ===
void print_results(const std::vector<TestResult>& results) {
    std::cout << "\n============= MyLogger Performance Test Summary =============" << std::endl;

    for (const auto& formatter_type : {"Basic", "JSON"}) {
        std::cout << "\n--- Formatter: " << formatter_type << " ---\n";
        for (size_t thread_count : TestConfig::thread_counts) {
            std::cout << "\nThread Count: " << thread_count << std::endl;
            std::cout << std::setw(15) << "Message Size"
                      << std::setw(20) << "Throughput (msg/s)"
                      << std::setw(20) << "Latency (μs)"
                      << std::setw(15) << "P95 (μs)"
                      << std::setw(15) << "P99 (μs)" << std::endl;
            std::cout << std::string(85, '-') << std::endl;

            for (const auto& result : results) {
                if (result.thread_count == thread_count && result.formatter_type == formatter_type) {
                    std::cout << std::fixed << std::setprecision(2)
                              << std::setw(15) << result.message_size
                              << std::setw(20) << result.stats.avg_throughput
                              << std::setw(20) << result.stats.avg_latency
                              << std::setw(15) << result.stats.p95_latency
                              << std::setw(15) << result.stats.p99_latency << std::endl;
                }
            }
        }
    }
    std::cout << "\n=========================================================\n";
}  


// --- 核心测试函数 (接收一个ILogger实例) ---
PerfStats perform_test(std::shared_ptr<ILogger> logger,
                       size_t message_count_per_thread, 
                       size_t thread_count, 
                       size_t message_size) {


    std::string formatter_type = logger->getFormatterType(); 
    std::cout << "\n--- Testing MyLogger ---" << std::endl;
    std::cout << "Threads: " << thread_count << ", Messages/Thread: "
              << message_count_per_thread 
              << ", Msg Size: " << message_size << "B, Formatter: " << formatter_type << std::endl;


    std::vector<double> throughputs;
    std::vector<double> all_latencies;
    std::string test_message = generate_message(message_size);

    for (int repeat = 0; repeat < TestConfig::repeat_count; ++repeat) {

        std::vector<std::thread> threads;
        std::vector<std::vector<double>> thread_latencies(thread_count);
        
        SimpleBarrier sync_point(thread_count + 1);

        for (size_t t = 0; t < thread_count; ++t) {
            threads.emplace_back([&, t, logger]() {
                auto& latencies = thread_latencies[t];
                latencies.reserve(message_count_per_thread);
                
                sync_point.arrive_and_wait(); // Wait for all threads to be ready
                
                for (size_t i = 0; i < message_count_per_thread; ++i) {
                    auto start = std::chrono::steady_clock::now();
                    LOG_INSTANCE_INFO(logger, "Thread {} - {} - {}", t, test_message, i);
                    auto end = std::chrono::steady_clock::now();
                    
                    auto latency = std::chrono::duration<double, std::micro>(end - start).count();
                    latencies.push_back(latency);
                }
            });
        }


        auto start_time = std::chrono::steady_clock::now();
        sync_point.arrive_and_wait(); // Release producer threads

        // 等待所有生产者线程结束
        for (auto &thread : threads) {
            thread.join();
        }
        auto end_time = std::chrono::steady_clock::now();

        double duration = std::chrono::duration<double>(end_time - start_time).count();
        double throughput = static_cast<double>(message_count_per_thread * thread_count) / duration;
        throughputs.push_back(throughput);


        for (const auto &latencies_vec : thread_latencies) {
            all_latencies.insert(all_latencies.end(), latencies_vec.begin(), latencies_vec.end());
        }


        std::cout << "  Waiting for async worker to flush... (repeat " << repeat + 1 << "/" << TestConfig::repeat_count << ")\n";

        logger->flush();
    }

    // 计算和显示统计值
    PerfStats stats;
    stats.avg_throughput = calculate_mean(throughputs);
    stats.std_dev_throughput = calculate_std_dev(throughputs, stats.avg_throughput);

    if (!all_latencies.empty()) {
        stats.avg_latency = calculate_mean(all_latencies);
        stats.std_dev_latency = calculate_std_dev(all_latencies, stats.avg_latency);
        stats.p95_latency = calculate_percentile(all_latencies, 0.95);
        stats.p99_latency = calculate_percentile(all_latencies, 0.99);
    }

    std::cout << std::fixed << std::setprecision(2);
    std::cout << "  Avg LOG_INFO throughput: " << stats.avg_throughput << " msg/s (±" << stats.std_dev_throughput << ")" << std::endl;
    std::cout << "  Avg LOG_INFO latency: " << stats.avg_latency << " μs (±" << stats.std_dev_latency << ")" << std::endl;
    std::cout << "  P95 latency: " << stats.p95_latency << " μs" << std::endl;
    std::cout << "  P99 latency: " << stats.p99_latency << " μs" << std::endl;

    return stats;
}


// === Main Test Function ===
int main() {
    try {
        std::filesystem::create_directories("logs");
        std::vector<TestResult> all_results;
        const size_t total_messages = 1'000'000;

        // --- 测试配置1: BasicFormatter ---
        {
            std::cout << "\n--- Setting up for BasicFormatter test ---\n";
            auto logger_basic = LoggerBuilder()
                                    .withBasicFormat()
                                    .withFileSink("logs/perf_basic.log", 1024*1024*100, 1)
                                    .build();

            for (size_t thread_count : TestConfig::thread_counts) {
                for (size_t msg_size : TestConfig::message_sizes) {
                    all_results.push_back({
                        thread_count,
                        msg_size,
                        perform_test(logger_basic, total_messages / thread_count, thread_count, msg_size),
                        "Basic"
                    });
                }
            }
        }   


        // --- 测试配置2: JsonFormatter ---
        {
            std::cout << "\n--- Setting up for JsonFormatter tests ---\n";
            auto logger_json = LoggerBuilder()
                                    .withJsonFormat()
                                    .withFileSink("logs/perf_json.log", 1024*1024*100, 1)
                                    .build();
        
            for (size_t thread_count : {1, 2, 4, 8, 16}) {
                for (size_t msg_size : {64, 128, 256}) {
                    all_results.push_back({
                        thread_count, 
                        msg_size,
                        perform_test(logger_json, total_messages / thread_count, thread_count, msg_size),
                        "JSON"
                    });
                }
            }
        }

        // 显示测试结果
        print_results(all_results);
          
        return EXIT_SUCCESS;
    } catch (const std::exception &e) {
        std::cerr << "Test failed: " << e.what() << std::endl;
        return EXIT_FAILURE;
    }
}
