package com.uhealin;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.NullOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.Tool;

public class TotalSaleDriver {

	 public static void main(String[] args) throws Exception{
		 
		 String inputtable=args[0],outputtable=args[1];
		 
		 Configuration conf = HBaseConfiguration.create();
		 
		 Job job = Job.getInstance(conf, "Sale Summary");
		 
		 job.setJarByClass(TotalSaleDriver.class);     // class that contains mapper and reducer

		 Scan scan = new Scan();
		 scan.setCaching(500);        // 1 is the default in Scan, which will be bad for MapReduce jobs
		 scan.setCacheBlocks(false);  // don't set to true for MR jobs
		 // set other scan attrs

		 TableMapReduceUtil.initTableMapperJob(
		 	inputtable,        // input table
		 	scan,               // Scan instance to control CF and attribute selection
		 	TotalSaleMapper.class,     // mapper class
		 	Text.class,         // mapper output key
		 	DoubleWritable.class,  // mapper output value
		 	job);
		 TableMapReduceUtil.initTableReducerJob(
		 	outputtable,        // output table
		 	TotalSaleReducer.class,    // reducer class
		 	job);
		 job.setNumReduceTasks(1);   // at least one, adjust as required

		 boolean b = job.waitForCompletion(true);
		 if (!b) {
		 	throw new IOException("error with job!");
		 }
		 
	 }

}