#pragma once

#include <chrono>
#include <vector>
#include <string>
#include <iostream>
#include <iomanip>
#include <algorithm>
#include <numeric>
#include <cmath>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>

/**
 * Utilities for Linear Layouts benchmarking
 * Provides timing, memory bandwidth, and performance analysis tools
 */

namespace benchmark_utils {

/**
 * High-resolution timer for CPU benchmarking
 */
class CPUTimer {
private:
    std::chrono::high_resolution_clock::time_point start_time;
    std::chrono::high_resolution_clock::time_point end_time;
    bool is_running = false;

public:
    void start() {
        start_time = std::chrono::high_resolution_clock::now();
        is_running = true;
    }
    
    void stop() {
        end_time = std::chrono::high_resolution_clock::now();
        is_running = false;
    }
    
    double elapsed_ms() const {
        if (is_running) {
            auto current = std::chrono::high_resolution_clock::now();
            return std::chrono::duration<double, std::milli>(current - start_time).count();
        } else {
            return std::chrono::duration<double, std::milli>(end_time - start_time).count();
        }
    }
    
    double elapsed_us() const {
        return elapsed_ms() * 1000.0;
    }
    
    double elapsed_ns() const {
        return elapsed_us() * 1000.0;
    }
};

/**
 * GPU timer using CUDA events
 */
class GPUTimer {
private:
    cudaEvent_t start_event, stop_event;
    bool events_created = false;
    bool is_running = false;

public:
    GPUTimer() {
        cudaEventCreate(&start_event);
        cudaEventCreate(&stop_event);
        events_created = true;
    }
    
    ~GPUTimer() {
        if (events_created) {
            cudaEventDestroy(start_event);
            cudaEventDestroy(stop_event);
        }
    }
    
    void start() {
        cudaEventRecord(start_event);
        is_running = true;
    }
    
    void stop() {
        cudaEventRecord(stop_event);
        cudaEventSynchronize(stop_event);
        is_running = false;
    }
    
    float elapsed_ms() const {
        float ms = 0;
        cudaEventElapsedTime(&ms, start_event, stop_event);
        return ms;
    }
    
    double elapsed_us() const {
        return static_cast<double>(elapsed_ms()) * 1000.0;
    }
    
    double elapsed_ns() const {
        return elapsed_us() * 1000.0;
    }
};

/**
 * Statistics calculator for benchmark results
 */
struct BenchmarkStats {
    double mean;
    double median;
    double stddev;
    double min_val;
    double max_val;
    int samples;
    
    BenchmarkStats(const std::vector<double>& values) {
        if (values.empty()) {
            mean = median = stddev = min_val = max_val = 0.0;
            samples = 0;
            return;
        }
        
        samples = values.size();
        
        // Calculate mean
        mean = std::accumulate(values.begin(), values.end(), 0.0) / samples;
        
        // Calculate standard deviation
        double variance = 0.0;
        for (double val : values) {
            variance += (val - mean) * (val - mean);
        }
        stddev = std::sqrt(variance / samples);
        
        // Calculate median
        auto sorted_values = values;
        std::sort(sorted_values.begin(), sorted_values.end());
        if (samples % 2 == 0) {
            median = (sorted_values[samples/2 - 1] + sorted_values[samples/2]) / 2.0;
        } else {
            median = sorted_values[samples/2];
        }
        
        // Min and max
        min_val = *std::min_element(values.begin(), values.end());
        max_val = *std::max_element(values.begin(), values.end());
    }
};

/**
 * Memory bandwidth calculator
 */
class BandwidthCalculator {
private:
    size_t bytes_transferred;
    double time_ms;

public:
    BandwidthCalculator(size_t bytes, double time_milliseconds) 
        : bytes_transferred(bytes), time_ms(time_milliseconds) {}
    
    double gb_per_second() const {
        return (bytes_transferred / (1024.0 * 1024.0 * 1024.0)) / (time_ms / 1000.0);
    }
    
    double effective_bandwidth_ratio(double peak_bandwidth_gb_s) const {
        return gb_per_second() / peak_bandwidth_gb_s;
    }
    
    double bytes_per_element() const {
        return static_cast<double>(bytes_transferred) / (time_ms / 1000.0);
    }
};

/**
 * GPU memory info helper
 */
struct GPUMemoryInfo {
    size_t free_bytes;
    size_t total_bytes;
    size_t used_bytes;
    
    static GPUMemoryInfo get_current() {
        GPUMemoryInfo info;
        cudaMemGetInfo(&info.free_bytes, &info.total_bytes);
        info.used_bytes = info.total_bytes - info.free_bytes;
        return info;
    }
    
    double free_gb() const { return free_bytes / (1024.0 * 1024.0 * 1024.0); }
    double total_gb() const { return total_bytes / (1024.0 * 1024.0 * 1024.0); }
    double used_gb() const { return used_bytes / (1024.0 * 1024.0 * 1024.0); }
    double usage_percent() const { return (used_bytes * 100.0) / total_bytes; }
};

/**
 * Benchmark result formatter
 */
class ResultFormatter {
public:
    static void print_header(const std::string& benchmark_name) {
        std::cout << "\n" << std::string(60, '=') << std::endl;
        std::cout << "BENCHMARK: " << benchmark_name << std::endl;
        std::cout << std::string(60, '=') << std::endl;
    }
    
    static void print_timing_results(const std::string& operation, const BenchmarkStats& stats, const std::string& unit = "ms") {
        std::cout << std::fixed << std::setprecision(3);
        std::cout << operation << " Performance:" << std::endl;
        std::cout << "  Mean:   " << stats.mean << " " << unit << std::endl;
        std::cout << "  Median: " << stats.median << " " << unit << std::endl;
        std::cout << "  StdDev: " << stats.stddev << " " << unit << std::endl;
        std::cout << "  Range:  " << stats.min_val << " - " << stats.max_val << " " << unit << std::endl;
        std::cout << "  Samples:" << stats.samples << std::endl;
    }
    
    static void print_bandwidth_results(const std::string& operation, double bandwidth_gb_s, double efficiency_ratio) {
        std::cout << std::fixed << std::setprecision(2);
        std::cout << operation << " Bandwidth:" << std::endl;
        std::cout << "  Achieved: " << bandwidth_gb_s << " GB/s" << std::endl;
        std::cout << "  Efficiency: " << (efficiency_ratio * 100.0) << "%" << std::endl;
    }
    
    static void print_speedup_comparison(const std::string& baseline, double baseline_time,
                                       const std::string& optimized, double optimized_time) {
        double speedup = baseline_time / optimized_time;
        std::cout << std::fixed << std::setprecision(2);
        std::cout << "Speedup Analysis:" << std::endl;
        std::cout << "  " << baseline << ": " << baseline_time << " ms" << std::endl;
        std::cout << "  " << optimized << ": " << optimized_time << " ms" << std::endl;
        std::cout << "  Speedup: " << speedup << "x";
        if (speedup > 1.0) {
            std::cout << " (" << ((speedup - 1.0) * 100.0) << "% faster)";
        } else {
            std::cout << " (" << ((1.0 - speedup) * 100.0) << "% slower)";
        }
        std::cout << std::endl;
    }
};

/**
 * CUDA error checking helper
 */
#define CHECK_CUDA_ERROR(call) do { \
    cudaError_t err = call; \
    if (err != cudaSuccess) { \
        std::cerr << "CUDA error at " << __FILE__ << ":" << __LINE__ << " - " << cudaGetErrorString(err) << std::endl; \
        exit(1); \
    } \
} while(0)

/**
 * Warmup GPU to get stable timing results
 */
__global__ void warmup_kernel() {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;
    if (idx < 1000) {
        // Simple computation to warm up GPU
        float x = sinf(idx * 0.1f);
        __syncthreads();
    }
}

inline void warmup_gpu() {
    warmup_kernel<<<10, 100>>>();
    cudaDeviceSynchronize();
}

/**
 * Memory allocation helper with alignment
 */
template<typename T>
T* allocate_aligned_host_memory(size_t num_elements, size_t alignment = 32) {
    void* ptr;
    if (posix_memalign(&ptr, alignment, num_elements * sizeof(T)) != 0) {
        throw std::bad_alloc();
    }
    return static_cast<T*>(ptr);
}

template<typename T>
T* allocate_gpu_memory(size_t num_elements) {
    T* ptr;
    CHECK_CUDA_ERROR(cudaMalloc(&ptr, num_elements * sizeof(T)));
    return ptr;
}

template<typename T>
void free_aligned_host_memory(T* ptr) {
    free(ptr);
}

template<typename T>
void free_gpu_memory(T* ptr) {
    cudaFree(ptr);
}

/**
 * Benchmark runner template
 */
template<typename BenchmarkFunc>
BenchmarkStats run_benchmark(BenchmarkFunc func, int warmup_runs = 3, int benchmark_runs = 10) {
    // Warmup runs
    for (int i = 0; i < warmup_runs; i++) {
        func();
    }
    
    // Benchmark runs
    std::vector<double> times;
    times.reserve(benchmark_runs);
    
    for (int i = 0; i < benchmark_runs; i++) {
        CPUTimer timer;
        timer.start();
        func();
        timer.stop();
        times.push_back(timer.elapsed_ms());
    }
    
    return BenchmarkStats(times);
}

/**
 * GPU benchmark runner template
 */
template<typename BenchmarkFunc>
BenchmarkStats run_gpu_benchmark(BenchmarkFunc func, int warmup_runs = 3, int benchmark_runs = 10) {
    warmup_gpu();
    
    // Warmup runs
    for (int i = 0; i < warmup_runs; i++) {
        func();
        cudaDeviceSynchronize();
    }
    
    // Benchmark runs
    std::vector<double> times;
    times.reserve(benchmark_runs);
    
    for (int i = 0; i < benchmark_runs; i++) {
        GPUTimer timer;
        timer.start();
        func();
        timer.stop();
        times.push_back(timer.elapsed_ms());
    }
    
    return BenchmarkStats(times);
}

/**
 * Hardware information helper
 */
struct HardwareInfo {
    std::string device_name;
    int major, minor;
    size_t total_global_mem;
    int multiprocessor_count;
    int max_threads_per_block;
    int max_threads_per_multiprocessor;
    float memory_bandwidth_gb_s;
    
    static HardwareInfo get_current_device() {
        HardwareInfo info;
        
        cudaDeviceProp prop;
        CHECK_CUDA_ERROR(cudaGetDeviceProperties(&prop, 0));
        
        info.device_name = prop.name;
        info.major = prop.major;
        info.minor = prop.minor;
        info.total_global_mem = prop.totalGlobalMem;
        info.multiprocessor_count = prop.multiProcessorCount;
        info.max_threads_per_block = prop.maxThreadsPerBlock;
        info.max_threads_per_multiprocessor = prop.maxThreadsPerMultiProcessor;
        
        // Estimate memory bandwidth (this is approximate)
        info.memory_bandwidth_gb_s = (prop.memoryBusWidth / 8.0f) * prop.memoryClockRate * 2.0f / 1e6f;
        
        return info;
    }
    
    void print_info() const {
        std::cout << "Hardware Information:" << std::endl;
        std::cout << "  Device: " << device_name << std::endl;
        std::cout << "  Compute Capability: " << major << "." << minor << std::endl;
        std::cout << "  Global Memory: " << (total_global_mem / (1024*1024*1024)) << " GB" << std::endl;
        std::cout << "  Multiprocessors: " << multiprocessor_count << std::endl;
        std::cout << "  Max Threads/Block: " << max_threads_per_block << std::endl;
        std::cout << "  Estimated Memory Bandwidth: " << memory_bandwidth_gb_s << " GB/s" << std::endl;
    }
};

/**
 * Profiling control helpers
 */
inline void start_profiling() {
    cudaProfilerStart();
}

inline void stop_profiling() {
    cudaProfilerStop();
}

} // namespace benchmark_utils