#include "kwdb/client.h"
#include "kwdb/stream_client.h"
#include "kwdb/batcher.h"
#include "kwdb/retry_policy.h"
#include "kwdb/metrics.h"
#include <iostream>
#include <chrono>
#include <thread>
#include <atomic>
#include <vector>
#include <cstring>
#include <iomanip>

using namespace kwdb;
using namespace std::chrono;

// Benchmark configuration from command line
struct BenchConfig {
    std::string mode = "stream";     // stream or unary
    size_t total_records = 100000;   // Total records to write
    size_t batch_size = 500;         // Batch size
    int concurrency = 4;             // Number of concurrent writers
    int linger_ms = 100;             // Linger time for batching
    bool inject_delay = false;       // Inject delays to test retry
};

BenchConfig ParseArgs(int argc, char** argv) {
    BenchConfig config;

    for (int i = 1; i < argc; i++) {
        if (strcmp(argv[i], "--mode") == 0 && i + 1 < argc) {
            config.mode = argv[++i];
        } else if (strcmp(argv[i], "--records") == 0 && i + 1 < argc) {
            config.total_records = std::stoul(argv[++i]);
        } else if (strcmp(argv[i], "--batch") == 0 && i + 1 < argc) {
            config.batch_size = std::stoul(argv[++i]);
        } else if (strcmp(argv[i], "--concurrency") == 0 && i + 1 < argc) {
            config.concurrency = std::stoi(argv[++i]);
        } else if (strcmp(argv[i], "--linger") == 0 && i + 1 < argc) {
            config.linger_ms = std::stoi(argv[++i]);
        } else if (strcmp(argv[i], "--inject-delay") == 0) {
            config.inject_delay = true;
        } else if (strcmp(argv[i], "--help") == 0) {
            std::cout << "Usage: bench_write [options]\n"
                      << "  --mode <stream|unary>  Write mode (default: stream)\n"
                      << "  --records <n>          Total records to write (default: 100000)\n"
                      << "  --batch <n>            Batch size (default: 500)\n"
                      << "  --concurrency <n>      Number of concurrent writers (default: 4)\n"
                      << "  --linger <ms>          Linger time in ms (default: 100)\n"
                      << "  --inject-delay         Inject delays to test retry\n";
            exit(0);
        }
    }

    return config;
}

void WriterThread(size_t thread_id, size_t num_points, const BenchConfig& config) {
    auto& metrics = GlobalMetrics::Instance();

    // Create client
    IngestConfig ingest_config;
    ingest_config.ingest_endpoint = "local";  // Use UDS for best performance
    auto client = std::make_unique<KwdbClient>(ingest_config);

    // Setup retry policy
    RetryConfig retry_config;
    retry_config.max_retries = 3;
    RetryPolicy retry(retry_config);

    // Setup batcher for stream mode
    std::unique_ptr<StreamClient> stream_client;
    std::unique_ptr<Batcher> batcher;

    if (config.mode == "stream") {
        stream_client = std::make_unique<StreamClient>(ingest_config);

        // Start stream
        Status status = stream_client->StartStream();
        if (!status.ok()) {
            std::cerr << "[Thread " << thread_id << "] Failed to start stream: "
                      << status.message << std::endl;
            return;
        }

        // Setup batcher
        BatcherConfig batcher_config;
        batcher_config.max_batch_size = config.batch_size;
        batcher_config.linger_ms = config.linger_ms;
        batcher_config.max_pending_batches = 10;

        batcher = std::make_unique<Batcher>(batcher_config,
            [&stream_client, &retry, &metrics](const std::vector<Point>& batch) -> Status {
                auto start = steady_clock::now();

                // Write with retry
                Status status = retry.Execute([&]() {
                    return stream_client->WriteBatch(batch);
                });

                auto latency = duration_cast<microseconds>(steady_clock::now() - start);

                if (status.ok()) {
                    // Record success for each point in the batch
                    for (size_t i = 0; i < batch.size(); i++) {
                        metrics.RecordSuccess(latency);
                    }
                } else {
                    // Record failure for each point in the batch
                    for (size_t i = 0; i < batch.size(); i++) {
                        metrics.RecordFailure();
                    }
                }

                return status;
            });
    }

    // Generate and write points
    std::string measurement = "bench_test_" + std::to_string(thread_id);

    for (size_t i = 0; i < num_points; i++) {
        Point point(measurement);
        point.add_tag("thread", std::to_string(thread_id))
             .add_tag("host", "bench_host_" + std::to_string(i % 10))
             .add_field("value", i * 1.5)
             .add_field("count", static_cast<double>(i))
             .timestamp_now();

        if (config.mode == "stream" && batcher) {
            // Use batcher for streaming
            batcher->Add(point);
        } else {
            // Direct write for unary mode
            auto start = steady_clock::now();

            Status status = retry.Execute([&]() {
                return client->batch_write({point});
            });

            auto latency = duration_cast<microseconds>(steady_clock::now() - start);

            if (status.ok()) {
                metrics.RecordSuccess(latency);
            } else {
                metrics.RecordFailure();
            }
        }

        // Inject delay if requested (to test retry)
        if (config.inject_delay && i % 1000 == 0) {
            std::this_thread::sleep_for(milliseconds(200));
        }
    }

    // Cleanup
    if (batcher) {
        batcher->Close();
    }
    if (stream_client) {
        stream_client->Finish();
    }

    std::cout << "[Thread " << thread_id << "] Completed " << num_points << " points" << std::endl;
}

int main(int argc, char** argv) {
    BenchConfig config = ParseArgs(argc, argv);

    std::cout << "========================================" << std::endl;
    std::cout << "        KWDB Write Benchmark" << std::endl;
    std::cout << "========================================" << std::endl;
    std::cout << "Mode:        " << config.mode << std::endl;
    std::cout << "Records:     " << config.total_records << std::endl;
    std::cout << "Batch Size:  " << config.batch_size << std::endl;
    std::cout << "Concurrency: " << config.concurrency << std::endl;
    std::cout << "Linger MS:   " << config.linger_ms << std::endl;
    std::cout << "========================================" << std::endl;

    // Reset metrics
    GlobalMetrics::Instance().Reset();

    // Calculate points per thread
    size_t points_per_thread = config.total_records / config.concurrency;
    size_t remaining = config.total_records % config.concurrency;

    // Start timing
    auto start_time = steady_clock::now();

    // Launch worker threads
    std::vector<std::thread> threads;
    for (int i = 0; i < config.concurrency; i++) {
        size_t points = points_per_thread;
        if (i == 0) points += remaining;  // First thread handles remainder

        threads.emplace_back(WriterThread, i, points, config);
    }

    // Wait for all threads
    for (auto& t : threads) {
        t.join();
    }

    // Calculate elapsed time
    auto elapsed = duration_cast<milliseconds>(steady_clock::now() - start_time);

    // Calculate QPS
    double qps = (double)config.total_records / (elapsed.count() / 1000.0);

    std::cout << "\n========================================" << std::endl;
    std::cout << "         Benchmark Results" << std::endl;
    std::cout << "========================================" << std::endl;
    std::cout << "Total Time:     " << elapsed.count() << " ms" << std::endl;
    std::cout << "QPS:            " << std::fixed << std::setprecision(0) << qps << std::endl;
    std::cout << "Records/Thread: " << points_per_thread << std::endl;

    // Print metrics
    GlobalMetrics::Instance().PrintReport();

    // Verify success
    auto& metrics = GlobalMetrics::Instance();
    if (metrics.GetSuccessCount() != config.total_records) {
        std::cerr << "\n❌ FAILED: Expected " << config.total_records
                  << " successful writes, got " << metrics.GetSuccessCount() << std::endl;
        return 1;
    }

    // Expected batch count (approximate due to linger)
    size_t expected_batches = (config.total_records + config.batch_size - 1) / config.batch_size;
    std::cout << "\nExpected Batches: ~" << expected_batches << std::endl;

    std::cout << "\n✅ Benchmark completed successfully!" << std::endl;
    return 0;
}