package io.kwdb.sdk;

import io.grpc.ManagedChannel;
import io.kwdb.sdk.internal.Metrics;
import io.kwdb.sdk.internal.RetryPolicy;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;

/**
 * High-performance batch streaming client that integrates: - Automatic batching (Batcher) - Retry
 * logic with exponential backoff (RetryPolicy) - Performance metrics (Metrics)
 *
 * <p>This is the Phase-2 implementation matching C++ SDK capabilities.
 */
public class StreamingBatchClient {
  private static final Logger logger = Logger.getLogger(StreamingBatchClient.class.getName());

  private final StreamingClient streamingClient;
  private final Batcher<Point> batcher;
  private final RetryPolicy retryPolicy;
  private final Metrics metrics;
  private final long startTimeNs;
  private volatile boolean closed = false;

  /** Builder for StreamingBatchClient configuration. */
  public static class Builder {
    private final ManagedChannel channel;
    private int maxBatchSize = 1000;
    private long lingerMs = 100;
    private int maxQueueSize = 10000;
    private Config.Backpressure backpressureStrategy = Config.Backpressure.BLOCK;
    private boolean enableRetry = true;
    private int maxRetries = 3;
    private long baseBackoffMs = 100;
    private long offerTimeoutMs = 1000;

    public Builder(ManagedChannel channel) {
      this.channel = channel;
    }

    public Builder maxBatchSize(int maxBatchSize) {
      this.maxBatchSize = maxBatchSize;
      return this;
    }

    public Builder lingerMs(long lingerMs) {
      this.lingerMs = lingerMs;
      return this;
    }

    public Builder maxQueueSize(int maxQueueSize) {
      this.maxQueueSize = maxQueueSize;
      return this;
    }

    public Builder backpressureStrategy(Config.Backpressure strategy) {
      this.backpressureStrategy = strategy;
      return this;
    }

    public Builder offerTimeoutMs(long offerTimeoutMs) {
      this.offerTimeoutMs = offerTimeoutMs;
      return this;
    }

    public Builder enableRetry(boolean enableRetry) {
      this.enableRetry = enableRetry;
      return this;
    }

    public Builder maxRetries(int maxRetries) {
      this.maxRetries = maxRetries;
      return this;
    }

    public Builder baseBackoffMs(long baseBackoffMs) {
      this.baseBackoffMs = baseBackoffMs;
      return this;
    }

    public StreamingBatchClient build() {
      return new StreamingBatchClient(this);
    }
  }

  private final long offerTimeoutMs;

  private StreamingBatchClient(Builder builder) {
    this.streamingClient = new StreamingClient(builder.channel);
    this.retryPolicy =
        new RetryPolicy(builder.enableRetry, builder.maxRetries, builder.baseBackoffMs);
    this.metrics = Metrics.getInstance();
    this.startTimeNs = System.nanoTime();
    this.offerTimeoutMs = builder.offerTimeoutMs;

    // Create batcher with flush callback (starts automatically)
    this.batcher =
        new Batcher<>(
            builder.maxBatchSize,
            (int) builder.lingerMs,
            builder.maxQueueSize,
            builder.backpressureStrategy,
            this::flushBatchWithRetry);
  }

  /**
   * Create a builder for StreamingBatchClient.
   *
   * @param channel The gRPC channel to use
   * @return Builder instance
   */
  public static Builder builder(ManagedChannel channel) {
    return new Builder(channel);
  }

  /**
   * Start the streaming batch client. Must be called before writing any data.
   *
   * @return CompletableFuture that completes when client is ready
   */
  public CompletableFuture<Status> start() {
    CompletableFuture<Status> future = new CompletableFuture<>();

    // Start the underlying stream (batcher starts automatically in constructor)
    streamingClient
        .startStream()
        .thenAccept(
            status -> {
              if (status.isOk()) {
                logger.log(Level.INFO, "StreamingBatchClient started successfully");
                future.complete(Status.ok());
              } else {
                logger.log(Level.WARNING, "Failed to start stream: {0}", status.getMessage());
                future.complete(status);
              }
            })
        .exceptionally(
            throwable -> {
              future.completeExceptionally(throwable);
              return null;
            });

    return future;
  }

  /**
   * Write a single point. The point will be automatically batched.
   *
   * @param point Point to write
   * @return Status of the write operation
   */
  public Status write(Point point) {
    if (closed) {
      return Status.error(Status.Code.FAILED_PRECONDITION, "Client is closed");
    }

    try {
      boolean offered = batcher.offer(point, offerTimeoutMs);
      if (!offered) {
        metrics.incrementDropped(1);
        return Status.error(Status.Code.RESOURCE_EXHAUSTED, "Queue full, point dropped");
      }
      return Status.ok();
    } catch (Exception e) {
      logger.log(Level.WARNING, "Error writing point", e);
      metrics.incrementWritesFailed(1, Status.Code.INTERNAL.getValue());
      return Status.error(Status.Code.INTERNAL, e.getMessage());
    }
  }

  /**
   * Flush all pending batches immediately.
   *
   * @return Status of the flush operation
   */
  public Status flush() {
    if (closed) {
      return Status.error(Status.Code.FAILED_PRECONDITION, "Client is closed");
    }

    try {
      batcher.flush();
      return Status.ok();
    } catch (Exception e) {
      logger.log(Level.WARNING, "Error flushing", e);
      return Status.error(Status.Code.INTERNAL, e.getMessage());
    }
  }

  /**
   * Close the client and wait for all pending data to be flushed. Note: This method doesn't
   * implement AutoCloseable for compatibility with Status return type.
   *
   * @return Status of the close operation
   */
  public Status close() {
    return close(30, TimeUnit.SECONDS);
  }

  /**
   * Close the client and wait for all pending data to be flushed with timeout.
   *
   * @param timeout Timeout value
   * @param unit Timeout unit
   * @return Status of the close operation
   */
  public Status close(long timeout, TimeUnit unit) {
    if (closed) {
      return Status.ok();
    }

    closed = true;

    try {
      // Close batcher (will flush remaining data)
      batcher.close();

      // Finish the stream
      Status finishStatus = streamingClient.finish(timeout, unit);

      logger.log(
          Level.INFO,
          "StreamingBatchClient closed. Total sent: {0}, Total written: {1}",
          new Object[] {
            streamingClient.getTotalPointsSent(), streamingClient.getTotalRowsWritten()
          });

      return finishStatus;

    } catch (Exception e) {
      logger.log(Level.WARNING, "Error closing client", e);
      return Status.error(Status.Code.INTERNAL, e.getMessage());
    }
  }

  /**
   * Get the current metrics snapshot.
   *
   * @return Metrics snapshot
   */
  public Metrics.MetricsSnapshot getMetrics() {
    return metrics.getSnapshot();
  }

  /**
   * Get the total number of points sent.
   *
   * @return Total points sent
   */
  public long getTotalPointsSent() {
    return streamingClient.getTotalPointsSent();
  }

  /**
   * Get the total number of rows written by the server.
   *
   * @return Total rows written
   */
  public long getTotalRowsWritten() {
    return streamingClient.getTotalRowsWritten();
  }

  /**
   * Get the current QPS (queries per second).
   *
   * @return Current QPS
   */
  public double getCurrentQPS() {
    long elapsedNs = System.nanoTime() - startTimeNs;
    double elapsedSec = elapsedNs / 1_000_000_000.0;
    return elapsedSec > 0 ? metrics.getSnapshot().writesOk / elapsedSec : 0.0;
  }

  /**
   * Get batcher statistics.
   *
   * @return Batcher statistics
   */
  public Batcher.BatcherStats getBatcherStats() {
    return batcher.getStats();
  }

  /**
   * Internal method to flush a batch with retry logic. Called by the Batcher when a batch is ready.
   */
  private void flushBatchWithRetry(List<Point> batch) {
    if (batch.isEmpty()) {
      return;
    }

    long startTime = System.nanoTime();

    // Update metrics
    metrics.observeBatchSize(batch.size());
    metrics.updateQueueSize(batcher.getStats().currentQueueSize);

    // Execute with retry
    Status status =
        retryPolicy.execute(
            () -> {
              // Write batch to stream
              return streamingClient.writeBatch(batch);
            });

    long latencyMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);

    // Record metrics
    if (status.isOk()) {
      metrics.incrementWritesOk(batch.size());
      metrics.observeBatchLatency(latencyMs);
      for (int i = 0; i < batch.size(); i++) {
        metrics.observeWriteLatency(latencyMs);
      }
    } else {
      metrics.incrementWritesFailed(batch.size(), status.getCode().getValue());
      logger.log(
          Level.WARNING,
          "Failed to write batch of {0} points: {1}",
          new Object[] {batch.size(), status.getMessage()});
    }
  }
}
