package org.gbif.metrics.cli;

import org.gbif.common.messaging.MessageListener;
import org.gbif.common.messaging.api.MessageCallback;
import org.gbif.common.messaging.api.messages.OccurrencePersistedMessage;

import java.io.File;

import com.google.common.util.concurrent.AbstractIdleService;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static com.google.common.base.Preconditions.checkArgument;

/**
 * A base class for services that will mutate cubes from occurrences.
 * 
 * @param <T> The configuration type
 */
abstract class CubeUpdaterService<T extends CubeConfiguration> extends AbstractIdleService {

  private static final Logger LOG = LoggerFactory.getLogger(CubeUpdaterService.class);
  private final T configuration;
  private MessageListener listener;
  private MessageCallback<OccurrencePersistedMessage> callback;

  protected CubeUpdaterService(T configuration) {
    this.configuration = configuration;
  }

  abstract MessageCallback<OccurrencePersistedMessage> getCallback(T configuration, Configuration hadoopConfiguration);

  @Override
  protected void startUp() throws Exception {
    File hbaseConfig = new File(configuration.hbaseConfig);
    checkArgument(hbaseConfig.exists() && hbaseConfig.isFile(), "hbase-site.xml does not exist");

    Configuration conf = new Configuration();
    conf.addResource(hbaseConfig.toURI().toURL());

    // We are doing a huge amount of very tiny PUTs so reduce the buffer, as the default of 2MB means HBase
    // is being asked to process huge numbers of rows, and tasks start to timeout. These will likely hit
    // 1 region if the cube is initially empty.
    conf.set("hbase.client.write.buffer", "262144"); // 256k

    callback = getCallback(configuration, conf);

    LOG.info("Starting cube service with {} listeners merging batches of {} messages before writing",
      configuration.poolSize, configuration.writeBatchSize);

    listener = new MessageListener(configuration.messaging.getConnectionParameters());
    listener.listen(configuration.queueName, configuration.poolSize, callback);
  }
}
