/**
 * 
 */
package org.gbif.occurrence.store.mapreduce;

import java.io.IOException;
import java.util.UUID;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.gbif.occurrence.store.util.ConfigurableRecordReader;

/**
 * This is a tab file loader that will make use of parallelisation
 * inherent in a MapReduce job and load in records during the Map phase.
 * 
 * This is launched using a property file that describes the input tab delimited
 * file, and maps columns in the tab file to family:columns in HBase.
 * 
 * Records are given a generated UUID as the key.
 * 
 * Please pay attention to the following:
 * 
 * 1) It should be noted that at the time of writing, this is intended to seed the 
 * HBase quickly, and should not be run more than once, or else duplication 
 * will occur.  No "check if this record exists" is performed.
 * 
 * 2) This will load in the data defined in the provided mapping file.  Therefore there
 * is potential to create column families and columns on the HBase table as the data 
 * is loaded.  Pay particular attention to typing errors in the mapping file.  
 * 
 * 3) Everything is treated as a String.  
 * 
 * 4) No timestamps are set
 * 
 * @author tim
 */
public class DwCTabFileLoader {
	
	/**
	 * Does the actual loading of data in HBase, and is configured to point at HBase in the setup.
	 * Nothing is output.
	 */
	public static class MapLoad extends Mapper<LongWritable, Text, Text, Text> {
		protected HTable table;
		protected HBaseConfiguration hbConf;
		protected ConfigurableRecordReader reader;

		@Override
		protected void setup(Context context) throws IOException,
				InterruptedException {
			super.setup(context);
			hbConf = new HBaseConfiguration();
	        table = new HTable(hbConf, context.getConfiguration().get("table.name"));
	        // ignore \N and use tab file format
	        reader = new ConfigurableRecordReader(context.getConfiguration().get("input.mapping"), true, "\t");
		}
		
		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			if ( table == null ) {
		        throw new IOException("Table cannot be null.  This Mapper is not configured correctly.");
			}
			
			String[] splits = reader.split(value.toString());
			
			// consider a business unique, or UUID generated from a business unique?
			String rowID = UUID.randomUUID().toString();
			
			Put row = new Put(rowID.getBytes());
			int fields = reader.readAllInto(splits, row);
			context.setStatus("Map updating cell for row[" + rowID+ "] with " + fields + " fields");
			table.put(row);
		}		
	}
	
	/**
	 * @param args <in> <out> <mapping-file> <table-name>
	 */
	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
	    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
	    
	    if (otherArgs.length != 3) {
	      System.err.println("Usage: DwCTabFileLoader <in> <out> <mapping-file> <table-name>E.g.:");
	      System.err.println("  DwCTabFileLoader input/myfile.text output/isNotUsed.txt /org/gbif/occurrencestore/dwc.mapping occcurrence-record");
	      System.exit(2);
	    }
	    Job job = new Job(conf, "DwCTabFileLoader");
	    job.setJarByClass(DwCTabFileLoader.class);
	    job.setMapperClass(MapLoad.class);

	    // There is no output so perhaps it can be ignored?
	    //job.setMapOutputKeyClass(Text.class);
	    //job.setMapOutputValueClass(Text.class);
	    //job.setOutputKeyClass(Text.class);
	    //job.setOutputValueClass(Text.class);
	    
	    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
	    
	    // Perhaps this can be ignored?
	    //FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
	    
	    job.getConfiguration().set("input.mapping", otherArgs[2]);
	    job.getConfiguration().set("table.name", otherArgs[3]);
	    
	    System.exit(job.waitForCompletion(true) ? 0 : 1);		
	}
}
