package org.gbif.metrics.cube.occurrence.backfill;

import org.gbif.metrics.cube.HBaseCubes;
import org.gbif.metrics.cube.HBaseSourcedBackfill;
import org.gbif.metrics.cube.occurrence.OccurrenceCube;
import org.gbif.occurrencestore.api.model.constants.FieldName;
import org.gbif.occurrencestore.persistence.OccurrenceResultReader;

import java.io.IOException;
import java.util.List;
import java.util.Map.Entry;

import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.urbanairship.datacube.Dimension;
import com.urbanairship.datacube.DimensionAndBucketType;
import com.urbanairship.datacube.WriteBuilder;
import com.urbanairship.datacube.ops.LongOp;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable;


public class CubeWriterMapper extends TableMapper<NullWritable, NullWritable> {

  private HBaseCubes<LongOp> hbaseCube;

  private WriteBuilder addIntDimension(WriteBuilder wb, Integer i, Dimension<Integer> dim) {
    if (i != null) {
      wb = wb.at(dim, i);
    }
    return wb;
  }

  private WriteBuilder addStringDimension(WriteBuilder wb, String s, Dimension<String> dim) {
    if (!Strings.isNullOrEmpty(s)) {
      wb = wb.at(dim, s);
    }
    return wb;
  }

  @Override
  protected void cleanup(Context context) throws IOException, InterruptedException {
    super.cleanup(context);
    hbaseCube.close();
  }

  @Override
  protected void map(ImmutableBytesWritable key, Result row, Context context) throws IOException, InterruptedException {
    try {
      WriteBuilder wb = new WriteBuilder(OccurrenceCube.INSTANCE);
      wb = addStringDimension(wb, OccurrenceResultReader.getString(row, FieldName.I_KINGDOM), OccurrenceCube.KINGDOM);
      wb = addStringDimension(wb, OccurrenceResultReader.getString(row, FieldName.I_BASIS_OF_RECORD), OccurrenceCube.BASIS_OF_RECORD);
      wb = addStringDimension(wb, OccurrenceResultReader.getString(row, FieldName.I_ISO_COUNTRY_CODE), OccurrenceCube.COUNTRY_ISO);
      wb = addStringDimension(wb, OccurrenceResultReader.getString(row, FieldName.DATASET_KEY), OccurrenceCube.DATASET_KEY);
      wb = addIntDimension(wb, OccurrenceResultReader.getInteger(row, FieldName.I_YEAR), OccurrenceCube.YEAR);
      wb = addIntDimension(wb, OccurrenceResultReader.getInteger(row, FieldName.I_MONTH), OccurrenceCube.MONTH);

      Double latitude = OccurrenceResultReader.getDouble(row, FieldName.I_LATITUDE);
      Double longitude = OccurrenceResultReader.getDouble(row, FieldName.I_LONGITUDE);
      if (latitude != null && longitude != null) {
        wb = wb.at(OccurrenceCube.IS_GEOREFERENCED, true);
      } else {
        wb = wb.at(OccurrenceCube.IS_GEOREFERENCED, false);
      }

      wb = addIntDimension(wb, OccurrenceResultReader.getInteger(row, FieldName.I_NUB_ID), OccurrenceCube.TAXON_ID);

      // display the dimensions that have been set in this pass, to aid monitoring
      List<String> dims = Lists.newArrayList();
      for (Entry<DimensionAndBucketType, byte[]> e : wb.getBuckets().entrySet()) {
        dims.add(e.getKey().dimension.getName());
      }
      context.setStatus(Joiner.on(",").join(dims));


      hbaseCube.write(new LongOp(1), wb);

    } catch (Exception e) {
      throw new IOException(e);
    }
  }

  @Override
  protected void setup(Context context) throws IOException, InterruptedException {
    super.setup(context);
    Configuration conf = context.getConfiguration();
    int writeBatchSize = conf.getInt(HBaseSourcedBackfill.KEY_WRITE_BATCH_SIZE, HBaseSourcedBackfill.DEFAULT_WRITE_BATCH_SIZE);
    // this is the basic incrementing as each record increments the existing count per rollup
    hbaseCube =
      HBaseCubes.newIncrementingBatchAsync(OccurrenceCube.INSTANCE, LongOp.DESERIALIZER,
        Bytes.toBytes(conf.get(HBaseSourcedBackfill.KEY_BACKFILL_TABLE)), Bytes.toBytes(conf.get(HBaseSourcedBackfill.KEY_LOOKUP_TABLE)),
        Bytes.toBytes(conf.get(HBaseSourcedBackfill.KEY_COUNTER_TABLE)), Bytes.toBytes(conf.get(HBaseSourcedBackfill.KEY_CF)), conf, writeBatchSize);
  }
}
