package eu.dicodeproject.analysis.histogram;

import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.mahout.common.IntegerTuple;

import java.io.IOException;
import java.text.FieldPosition;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Hashtable;
import java.util.Map.Entry;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * Scans over some HBase table. Looks for pre-configured queries in each textCol. Queries are assigned integers, those
 * integers are used as first part of the output keys. Second part of the output keys correspond to bucket ids -
 * currently a new bucket is opened every week. Output values are equal to one if a match was found in the tweet text
 * for the respective query/bucket id.
 */
public class HBaseHistogramMapper extends TableMapper<IntegerTuple, IntWritable> {
  /** Set of patterns with associated key for later output.*/
  private final Hashtable<Integer, Pattern> queries = new Hashtable<Integer, Pattern>();

  public static enum ErrorCases {TWEETNULL, DATENULL};

  private SimpleDateFormat dateFormat;
  private SimpleDateFormat outputFormat;

  @Override
  protected void setup(Context context) throws IOException, InterruptedException {
    super.setup(context);
    String queryTerms = context.getConfiguration().get(HistogramDriver.QUERY_TERMS, "dicode");
    int i = 0;
    for (String queryTerm : queryTerms.split(":")) {
      Pattern pattern = Pattern.compile(queryTerm);
      this.queries.put(i, pattern);
      i++;
    }
    // Wed, 15 Dec 2010 09:39:23 +0000
    this.dateFormat = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss +SSS");
    this.outputFormat = new SimpleDateFormat("yyDDD");
  }

  @Override
  protected void map(final ImmutableBytesWritable row, final Result values, final Mapper.Context context)
      throws IOException, InterruptedException {
    // extract the data
    byte[] creationDateBytes = values.getValue(Bytes.toBytes("metaFamily"), Bytes.toBytes("creationDate"));
    byte[] tweetTextBytes = values.getValue(Bytes.toBytes("textFamily"), Bytes.toBytes("text"));
    

    // we anticipate errors as in non-existant tweetText or creationDate - those are ignored
    boolean failed = false;
    if (tweetTextBytes == null) {
      context.getCounter(ErrorCases.TWEETNULL).increment(1);
      failed = true;
    }
    if (creationDateBytes == null) {
      context.getCounter(ErrorCases.DATENULL).increment(1);
      failed = true;
    }
    
    // finally do the matching
    if (!failed) {
      String creationDate = new String(creationDateBytes);
      String tweetText = new String(tweetTextBytes);
      for (Entry<Integer, Pattern> query : queries.entrySet()) {
        Matcher matcher = query.getValue().matcher(tweetText);
        if (matcher.find()) {
          Date date = this.dateFormat.parse(creationDate, new ParsePosition(0));
          StringBuffer buffer = new StringBuffer();
          this.outputFormat.format(date, buffer, new FieldPosition(0));
          Integer[] list = new Integer[2];
          list[1] = Integer.parseInt(buffer.toString());

          // for simplicity we generate keys based on the dateformat yyDDD - however on year leaps this leaves a gap
          // of 9635 days which does not display well. TODO fix in a more general way.
          if (list[1] > 11000) list[1] -= 634;

          list[0] = query.getKey();
          IntegerTuple key = new IntegerTuple(list);
          context.write(key, new IntWritable(1));
          break;
        }
      }
    }
  }

}
