package org.gbif.crawler;

import org.gbif.api.model.registry.Endpoint;

import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;

import com.yammer.metrics.Metrics;
import com.yammer.metrics.core.Counter;
import com.yammer.metrics.core.Gauge;
import com.yammer.metrics.core.Timer;
import com.yammer.metrics.core.TimerContext;

/**
 * This package-visible class consolidates all metrics collected by the {@link CrawlerCoordinatorServiceImpl} class.
 */
class CrawlerCoordinatorServiceMetrics {

  private final Counter successfulSchedules =
    Metrics.newCounter(CrawlerCoordinatorService.class, "successfulSchedules");
  private final Counter unsuccessfulSchedules =
    Metrics.newCounter(CrawlerCoordinatorService.class, "unsuccessfulSchedules");
  private final Counter alreadyScheduledDatasets =
    Metrics.newCounter(CrawlerCoordinatorService.class, "alreadyScheduledDatasets");
  private final Counter noValidEndpoints = Metrics.newCounter(CrawlerCoordinatorService.class, "noValidEndpoints");

  private final Counter digirRequests = Metrics.newCounter(CrawlerCoordinatorService.class, "digirRequests");
  private final Counter biocaseRequests = Metrics.newCounter(CrawlerCoordinatorService.class, "biocaseRequests");
  private final Counter tapirRequests = Metrics.newCounter(CrawlerCoordinatorService.class, "tapirRequests");

  /* These two fields are used to give a quick "status" to anyone interested. I modeled it after the Hadoop status
     messages which have proven to be useful in the past. This UUID is updated every time a new dataset UUID was
     successfully enqueued. */
  private final AtomicReference<UUID> lastUuid = new AtomicReference<UUID>();
  private final Gauge<UUID> lastSuccessfulDatasetUuid =
    Metrics.newGauge(CrawlerCoordinatorService.class, "lastSuccessfulDatasetUuid", new Gauge<UUID>() {

      @Override
      public UUID value() {
        return lastUuid.get();
      }

    });

  private final Timer crawls =
    Metrics.newTimer(CrawlerCoordinatorService.class, "crawls", TimeUnit.MILLISECONDS, TimeUnit.HOURS);
  private TimerContext timerContext;

  public void alreadyScheduled() {
    alreadyScheduledDatasets.inc();
  }

  public void noValidEndpoint() {
    noValidEndpoints.inc();
  }

  public void registerCrawl(Endpoint endpoint) {
    switch (endpoint.getType()) {
      case DIGIR:
      case DIGIR_MANIS:
        digirRequests.inc();
        break;
      case TAPIR:
        tapirRequests.inc();
        break;
      case BIOCASE:
        biocaseRequests.inc();
        break;
    }
  }

  public void successfulSchedule(UUID datasetUuid) {
    lastUuid.set(datasetUuid);
    successfulSchedules.inc();
  }

  public void timerStart() {
    timerContext = crawls.time();
  }

  public void timerStop() {
    timerContext.stop();
  }

  public void unsuccessfulSchedule() {
    unsuccessfulSchedules.inc();
  }

}
