#include "stream_handler.h"
#include <iostream>
#include <sstream>
#include <iomanip>
#include <ctime>
#include <libpq-fe.h>  // For PostgreSQL execution

StreamHandler::StreamHandler() {
    // Read configuration from environment variables
    const char* batch_size_env = getenv("KWDB_BATCH_SIZE");
    const char* flush_interval_env = getenv("KWDB_FLUSH_INTERVAL_MS");

    batch_size_ = batch_size_env ? std::stoul(batch_size_env) : 1000;
    flush_interval_ms_ = flush_interval_env ? std::stoi(flush_interval_env) : 100;

    // Start monitoring thread
    monitor_thread_ = std::make_unique<std::thread>(&StreamHandler::MonitorLoop, this);

    std::cout << "[StreamHandler] Initialized with batch_size=" << batch_size_
              << ", flush_interval=" << flush_interval_ms_ << "ms" << std::endl;
}

StreamHandler::~StreamHandler() {
    running_ = false;
    if (monitor_thread_ && monitor_thread_->joinable()) {
        monitor_thread_->join();
    }
}

// Single request-response (maintains compatibility)
Status StreamHandler::WriteBatch(ServerContext* context,
                                 const WriteRequest* request,
                                 WriteResponse* response) {
    std::vector<Point> batch;
    for (const auto& point : request->points()) {
        batch.push_back(point);
    }

    FlushBatch(batch);

    response->set_code(0);
    response->set_rows_written(batch.size());
    response->set_message("Batch written successfully");
    return Status::OK;
}

// Client-side streaming (core performance optimization)
Status StreamHandler::WriteStream(ServerContext* context,
                                  ServerReader<WriteRequest>* reader,
                                  WriteResponse* response) {
    WriteRequest request;
    std::vector<Point> batch;
    batch.reserve(batch_size_);

    uint64_t total_points = 0;
    auto start_time = std::chrono::steady_clock::now();

    // Stream reception with batch processing
    while (reader->Read(&request)) {
        for (const auto& point : request.points()) {
            batch.push_back(point);

            // Flush when threshold is reached
            if (batch.size() >= batch_size_) {
                FlushBatch(batch);
                total_points += batch.size();
                batch.clear();
                batch.reserve(batch_size_);
            }
        }
    }

    // Process remaining data
    if (!batch.empty()) {
        FlushBatch(batch);
        total_points += batch.size();
    }

    auto end_time = std::chrono::steady_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();

    response->set_code(0);
    response->set_rows_written(total_points);
    response->set_message("Stream processed in " + std::to_string(duration) + "ms");

    std::cout << "[WriteStream] Processed " << total_points
              << " points in " << duration << "ms ("
              << (total_points * 1000 / std::max(duration, 1L)) << " points/sec)"
              << std::endl;

    return Status::OK;
}

// Columnar batch processing (reduces serialization overhead)
Status StreamHandler::WriteColumnar(ServerContext* context,
                                    const kwdb::ingest::v1::ColumnarBatch* request,
                                    WriteResponse* response) {
    // Validate data integrity
    if (request->timestamps_ns_size() != request->row_count()) {
        response->set_code(1);
        response->set_message("Timestamp count mismatch");
        return Status::OK;
    }

    // Build SQL directly from columnar data
    std::stringstream sql;
    sql << "INSERT INTO " << QuoteIdentifier(request->measurement()) << " (" << QuoteIdentifier("ts");

    // Collect all column names
    std::vector<std::string> tag_names;
    std::vector<std::string> field_names;

    for (const auto& [tag_name, _] : request->tag_columns()) {
        sql << ", " << QuoteIdentifier(tag_name);
        tag_names.push_back(tag_name);
    }
    for (const auto& [field_name, _] : request->field_columns()) {
        sql << ", " << QuoteIdentifier(field_name);
        field_names.push_back(field_name);
    }
    sql << ") VALUES ";

    // Batch VALUES (columnar to row format conversion)
    for (uint32_t i = 0; i < request->row_count(); i++) {
        if (i > 0) sql << ", ";
        sql << "(" << QuoteLiteral(FormatTimestamp(request->timestamps_ns(i)));

        // Add tag values
        for (const auto& tag_name : tag_names) {
            auto it = request->tag_columns().find(tag_name);
            if (it != request->tag_columns().end() && i < it->second.values_size()) {
                sql << ", " << QuoteLiteral(it->second.values(i));
            } else {
                sql << ", NULL";
            }
        }

        // Add field values
        for (const auto& field_name : field_names) {
            auto it = request->field_columns().find(field_name);
            if (it != request->field_columns().end() && i < it->second.values_size()) {
                sql << ", " << it->second.values(i);
            } else {
                sql << ", NULL";
            }
        }
        sql << ")";
    }

    // Execute SQL
    std::string err;
    int rc = ExecuteSQL(sql.str(), &err);

    if (rc == 0) {
        response->set_code(0);
        response->set_rows_written(request->row_count());
        response->set_message("Columnar batch written successfully");
    } else {
        response->set_code(1);
        response->set_message("SQL error: " + err);
    }

    std::cout << "[WriteColumnar] Processed " << request->row_count()
              << " rows in columnar format" << std::endl;

    return Status::OK;
}

// Bidirectional streaming (periodic progress reporting)
Status StreamHandler::WriteBidirectional(ServerContext* context,
                                         ServerReaderWriter<kwdb::ingest::v1::WriteProgress, WriteRequest>* stream) {
    WriteRequest request;
    std::vector<Point> batch;
    batch.reserve(batch_size_);

    uint64_t total_received = 0;
    uint64_t total_written = 0;
    int64_t last_timestamp = 0;
    uint32_t progress_interval = 10; // Send progress every 10 batches
    uint32_t batch_count = 0;

    while (stream->Read(&request)) {
        for (const auto& point : request.points()) {
            batch.push_back(point);
            total_received++;
            last_timestamp = point.timestamp_unix_ns();

            if (batch.size() >= batch_size_) {
                FlushBatch(batch);
                total_written += batch.size();
                batch.clear();
                batch.reserve(batch_size_);
                batch_count++;

                // Send periodic progress
                if (batch_count % progress_interval == 0) {
                    kwdb::ingest::v1::WriteProgress progress;
                    progress.set_total_received(total_received);
                    progress.set_total_written(total_written);
                    progress.set_last_timestamp_ns(last_timestamp);
                    stream->Write(progress);

                    std::cout << "[WriteBidirectional] Progress: received="
                              << total_received << ", written=" << total_written << std::endl;
                }
            }
        }
    }

    // Process remaining data
    if (!batch.empty()) {
        FlushBatch(batch);
        total_written += batch.size();
    }

    // Send final progress
    kwdb::ingest::v1::WriteProgress final_progress;
    final_progress.set_total_received(total_received);
    final_progress.set_total_written(total_written);
    final_progress.set_last_timestamp_ns(last_timestamp);
    stream->Write(final_progress);

    std::cout << "[WriteBidirectional] Completed: total_written=" << total_written << std::endl;

    return Status::OK;
}

// Background batch writer thread
void StreamHandler::WriterLoop() {
    std::vector<Point> batch;
    batch.reserve(batch_size_);

    while (running_) {
        std::unique_lock<std::mutex> lock(queue_mutex_);

        // Wait for data or timeout
        queue_cv_.wait_for(lock, std::chrono::milliseconds(flush_interval_ms_),
                          [this]() { return !point_queue_.empty() || !running_; });

        // Collect batch data
        while (!point_queue_.empty() && batch.size() < batch_size_) {
            batch.push_back(point_queue_.front());
            point_queue_.pop();
        }

        lock.unlock();

        // Batch write
        if (!batch.empty()) {
            FlushBatch(batch);
            batch.clear();
        }
    }
}

// Flush batch to database
void StreamHandler::FlushBatch(std::vector<Point>& batch) {
    if (batch.empty()) return;

    // Update monitoring stats
    stats_.total_batches.fetch_add(1);
    stats_.total_points.fetch_add(batch.size());
    stats_.points_since_last_report.fetch_add(batch.size());
    stats_.queue_depth.store(point_queue_.size());

    // Use existing write_points_sql function
    WriteRequest request;
    for (const auto& point : batch) {
        *request.add_points() = point;
    }

    std::string err;
    long long rows = write_points_sql(request, &err);

    stats_.total_inserts.fetch_add(1);
    stats_.inserts_since_last_report.fetch_add(1);

    if (rows < 0) {
        std::cerr << "[StreamHandler] Write failed: " << err << std::endl;
    }
}

// Helper function to quote identifier (table/column names)
std::string StreamHandler::QuoteIdentifier(const std::string& s) {
    // Simple quoting for identifiers - wrap in double quotes and escape internal quotes
    std::string result = "\"";
    for (char c : s) {
        if (c == '"') result += "\"\"";
        else result += c;
    }
    result += "\"";
    return result;
}

// Helper function to quote literal (string values)
std::string StreamHandler::QuoteLiteral(const std::string& s) {
    // Simple quoting for literals - wrap in single quotes and escape internal quotes
    std::string result = "'";
    for (char c : s) {
        if (c == '\'') result += "''";
        else result += c;
    }
    result += "'";
    return result;
}

// Format timestamp (nanoseconds to SQL timestamp)
std::string StreamHandler::FormatTimestamp(int64_t ns) {
    auto seconds = ns / 1000000000;
    auto nanos = ns % 1000000000;

    std::time_t t = seconds;
    std::tm* tm = std::gmtime(&t);

    std::stringstream ss;
    ss << std::put_time(tm, "%Y-%m-%d %H:%M:%S");
    ss << "." << std::setfill('0') << std::setw(9) << nanos;
    return ss.str();
}

// Execute SQL (through write_points_sql or exec_sql function)
int StreamHandler::ExecuteSQL(const std::string& sql, std::string* err) {
    // Use libpq directly for SQL execution
    std::ostringstream ci;
    ci << "host=" << (getenv("KWDB_HOST") ? getenv("KWDB_HOST") : "127.0.0.1")
       << " port=" << (getenv("KWDB_SQL_PORT") ? getenv("KWDB_SQL_PORT") : "26257")
       << " dbname=" << (getenv("KWDB_DB") ? getenv("KWDB_DB") : "kwdb")
       << " user=" << (getenv("KWDB_USER") ? getenv("KWDB_USER") : "root")
       << " sslmode=disable";
    const char* pwd = getenv("KWDB_PWD");
    if (pwd && *pwd) ci << " password=" << pwd;

    PGconn* conn = PQconnectdb(ci.str().c_str());
    if (PQstatus(conn) != CONNECTION_OK) {
        if (err) *err = PQerrorMessage(conn);
        PQfinish(conn);
        return -1;
    }

    PGresult* result = PQexec(conn, sql.c_str());
    ExecStatusType status = PQresultStatus(result);

    if (status != PGRES_COMMAND_OK && status != PGRES_TUPLES_OK) {
        if (err) *err = PQerrorMessage(conn);
        PQclear(result);
        PQfinish(conn);
        return -1;
    }

    PQclear(result);
    PQfinish(conn);
    return 0;
}

// Monitoring loop - prints performance statistics every second
void StreamHandler::MonitorLoop() {
    while (running_) {
        std::this_thread::sleep_for(std::chrono::seconds(1));

        auto now = std::chrono::steady_clock::now();
        auto duration = std::chrono::duration_cast<std::chrono::seconds>(
            now - stats_.last_report_time).count();

        if (duration >= 1) {
            // Calculate rates
            size_t points_rate = stats_.points_since_last_report / duration;
            size_t inserts_rate = stats_.inserts_since_last_report / duration;

            // Calculate averages
            size_t avg_batch_size = stats_.total_batches > 0 ?
                stats_.total_points / stats_.total_batches : 0;

            // Print report
            std::cout << "[MONITOR] QPS=" << points_rate
                      << " | Inserts/s=" << inserts_rate
                      << " | AvgBatch=" << avg_batch_size
                      << " | Queue=" << stats_.queue_depth
                      << " | Total=" << stats_.total_points
                      << std::endl;

            // Reset counters for next interval
            stats_.points_since_last_report.store(0);
            stats_.inserts_since_last_report.store(0);
            stats_.last_report_time = now;
        }
    }
}