#include "kwdb/stream_client.h"
#include "ingest.grpc.pb.h"
#include "transport.h"
#include "security_util.h"
#include <iostream>
#include <chrono>

namespace kwdb {

using grpc::ClientContext;
using grpc::ClientWriter;
// Don't use grpc::Status to avoid conflict with kwdb::Status
using namespace kwdb::ingest::v1;

// Implementation class (PIMPL pattern)
class StreamClient::StreamClientImpl {
public:
    std::shared_ptr<grpc::Channel> channel_;
    std::unique_ptr<IngestService::Stub> stub_;
    std::unique_ptr<ClientContext> context_;
    std::unique_ptr<ClientWriter<WriteRequest>> writer_;
    WriteResponse response_;
    SecurityConfig security_config_;  // Store security config for metadata injection

    // Buffering for efficiency
    std::vector<kwdb::Point> buffer_;
    std::mutex buffer_mutex_;
    std::thread flush_thread_;
    std::atomic<bool> running_{true};

    StreamClientImpl(const IngestConfig& config)
        : security_config_(config.security) {
        // Auto-detect local mode
        std::string endpoint = config.ingest_endpoint;
        if (endpoint == "local" || endpoint.empty()) {
            const char* uds_path_env = getenv("KWDB_UDS_PATH");
            std::string uds_path = uds_path_env ? uds_path_env : "/tmp/kwdb-ingestd.sock";
            endpoint = "unix://" + uds_path;
        } else if (endpoint.find("://") == std::string::npos) {
            endpoint = "dns:///" + endpoint;
        }

        // Create channel with optimizations
        grpc::ChannelArguments args;
        args.SetMaxReceiveMessageSize(100 * 1024 * 1024);
        args.SetMaxSendMessageSize(100 * 1024 * 1024);

        // Create channel credentials based on security configuration
        auto credentials = security::create_channel_credentials(config.security);

        channel_ = grpc::CreateCustomChannel(
            endpoint,
            credentials,
            args
        );

        stub_ = IngestService::NewStub(channel_);

        std::string security_info = config.security.tls ? " (TLS enabled)" : "";
        std::cout << "[StreamClient] Connected to " << endpoint << security_info << std::endl;
    }

    ~StreamClientImpl() {
        running_ = false;
        if (flush_thread_.joinable()) {
            flush_thread_.join();
        }
    }

    void StartFlushThread(int interval_ms) {
        flush_thread_ = std::thread([this, interval_ms]() {
            while (running_) {
                std::this_thread::sleep_for(std::chrono::milliseconds(interval_ms));
                FlushBuffer();
            }
        });
    }

    void FlushBuffer() {
        std::lock_guard<std::mutex> lock(buffer_mutex_);
        if (buffer_.empty() || !writer_) return;

        WriteRequest request;
        for (const auto& point : buffer_) {
            auto* proto_point = request.add_points();
            transport::convert_point(point, proto_point);
        }

        if (writer_->Write(request)) {
            buffer_.clear();
        }
    }
};

// Public interface implementation

StreamClient::StreamClient(const IngestConfig& config)
    : config_(config), impl_(std::make_unique<StreamClientImpl>(config)) {
}

StreamClient::~StreamClient() {
    if (stream_active_) {
        Finish();
    }
}

Status StreamClient::StartStream() {
    if (stream_active_) {
        return Status(-1, "Stream already active");
    }

    impl_->context_ = std::make_unique<ClientContext>();

    // Set deadline if configured
    if (config_.timeout_ms > 0) {
        auto deadline = std::chrono::system_clock::now() +
                       std::chrono::milliseconds(config_.timeout_ms);
        impl_->context_->set_deadline(deadline);
    }

    // Add authentication metadata if configured
    security::add_auth_metadata(*impl_->context_, impl_->security_config_);

    // Create the stream
    impl_->writer_ = impl_->stub_->WriteStream(impl_->context_.get(), &impl_->response_);

    if (!impl_->writer_) {
        return Status(-1, "Failed to create stream");
    }

    // Start background flush thread
    impl_->StartFlushThread(flush_interval_ms_);

    stream_active_ = true;
    std::cout << "[StreamClient] Stream started successfully" << std::endl;

    return Status();  // Default constructor returns OK status
}

Status StreamClient::WriteBatch(const std::vector<kwdb::Point>& points) {
    if (!stream_active_) {
        return Status(-1, "Stream not active");
    }

    std::lock_guard<std::mutex> lock(impl_->buffer_mutex_);

    for (const auto& point : points) {
        impl_->buffer_.push_back(point);

        // Auto-flush if buffer is full
        if (impl_->buffer_.size() >= batch_size_threshold_) {
            impl_->FlushBuffer();
        }
    }

    total_written_ += points.size();
    return Status();  // OK status
}

Status StreamClient::WritePoint(const kwdb::Point& point) {
    return WriteBatch({point});
}

Status StreamClient::Flush() {
    if (!stream_active_) {
        return Status(-1, "Stream not active");
    }

    impl_->FlushBuffer();
    return Status();  // OK status
}

Status StreamClient::Finish() {
    if (!stream_active_) {
        return Status(-1, "Stream not active");
    }

    // Flush any remaining data
    impl_->FlushBuffer();

    // Close the stream
    impl_->writer_->WritesDone();
    grpc::Status grpc_status = impl_->writer_->Finish();

    stream_active_ = false;
    impl_->running_ = false;

    if (!grpc_status.ok()) {
        return Status(-1, "Stream failed: " + grpc_status.error_message());
    }

    std::cout << "[StreamClient] Stream completed. Total written: "
              << impl_->response_.rows_written() << " rows" << std::endl;

    return Status();  // OK status
}

Status StreamClient::WriteColumnar(const std::string& measurement,
                                   const std::vector<int64_t>& timestamps,
                                   const std::map<std::string, std::vector<std::string>>& tags,
                                   const std::map<std::string, std::vector<double>>& fields) {
    // Create columnar batch request
    kwdb::ingest::v1::ColumnarBatch batch;
    batch.set_measurement(measurement);

    // Add timestamps
    for (int64_t ts : timestamps) {
        batch.add_timestamps_ns(ts);
    }
    batch.set_row_count(timestamps.size());

    // Add tag columns
    for (const auto& [tag_name, values] : tags) {
        auto& arr = (*batch.mutable_tag_columns())[tag_name];
        for (const auto& val : values) {
            arr.add_values(val);
        }
    }

    // Add field columns
    for (const auto& [field_name, values] : fields) {
        auto& arr = (*batch.mutable_field_columns())[field_name];
        for (double val : values) {
            arr.add_values(val);
        }
    }

    // Send columnar batch
    // Note: WriteColumnar uses a single RPC rather than streaming because:
    // 1. The columnar format is already highly optimized for batch transfer
    // 2. Single RPC simplifies error handling and transactional semantics
    // 3. For truly massive datasets, users should batch into multiple columnar calls
    ClientContext context;
    WriteResponse response;
    grpc::Status status = impl_->stub_->WriteColumnar(&context, batch, &response);

    if (!status.ok()) {
        return Status(-1, "Columnar write failed: " + status.error_message());
    }

    total_written_ += timestamps.size();

    std::cout << "[StreamClient] Columnar batch written: " << timestamps.size() << " rows" << std::endl;

    return Status();  // OK status
}

} // namespace kwdb