package org.gbif.metrics.cube.occurrence.backfill;

import org.gbif.metrics.cube.HBaseCubes;
import org.gbif.metrics.cube.HBaseSourcedBackfill;
import org.gbif.metrics.cube.mapred.OccurrenceWritable;
import org.gbif.metrics.cube.occurrence.OccurrenceAddressUtil;
import org.gbif.metrics.cube.occurrence.OccurrenceCube;

import java.io.IOException;
import java.util.concurrent.TimeUnit;

import com.urbanairship.datacube.Batch;
import com.urbanairship.datacube.ops.LongOp;
import com.yammer.metrics.reporting.GangliaReporter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Reducer;


public class CubeWriterReducer extends Reducer<OccurrenceWritable, IntWritable, NullWritable, NullWritable> {

  private HBaseCubes<LongOp> cube;

  @Override
  protected void cleanup(Context context) throws IOException, InterruptedException {
    super.cleanup(context);
    cube.close();
  }

  @Override
  protected void reduce(OccurrenceWritable key, Iterable<IntWritable> values, final Context context)
    throws IOException,
    InterruptedException {
    int total = 0;
    for (IntWritable i : values) {
      total += i.get();
    }
    try {
      Batch<LongOp> update = OccurrenceAddressUtil.cubeMutation(key, new LongOp(total));

      context.setStatus("Handling occurrence dataset[" + key.getDatasetKey() + "], update[" + total + "], mutations["
        + update.getMap().size()
        + "]");

      context.getCounter("GBIF", "Cube mutations").increment(update.getMap().size());
      context.getCounter("GBIF", "Occurrence records processed").increment(total);
      cube.write(update);

    } catch (Exception e) {
      throw new IOException(e);
    }

  }


  @Override
  protected void setup(Context context) throws IOException, InterruptedException {
    super.setup(context);
    Configuration conf = context.getConfiguration();
    int writeBatchSize =
      conf.getInt(HBaseSourcedBackfill.KEY_WRITE_BATCH_SIZE, HBaseSourcedBackfill.DEFAULT_WRITE_BATCH_SIZE);
    // this is the basic incrementing as each record increments the existing count per rollup
    // NOTE: Very importantly this is in BatchAsync to enable the writing in batches
    cube =
      HBaseCubes.newIncrementingBatchAsync(OccurrenceCube.INSTANCE, LongOp.DESERIALIZER,
        Bytes.toBytes(conf.get(HBaseSourcedBackfill.KEY_BACKFILL_TABLE)),
        Bytes.toBytes(conf.get(HBaseSourcedBackfill.KEY_CF)),
        conf, writeBatchSize);

    // TODO: configurify this
    GangliaReporter.enable(1, TimeUnit.MINUTES, "b5g2.gbif.org", 8649);
  }
}
