package index;
import mapreduce.WorkCount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.streaming.StreamInputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

/**
 * @author Guimin Lin
 * @date Feb 17, 2011
 */
public class Index extends Configured implements Tool {
	
	public static final String SEPERATOR = "@";
	public static final String TF_SYM = ":";
	public static final String CONNECT_SYM = "_";
	public static final String DF_SYM = ";";
	public static final int COMBINER_MAX_SIZE=0x800000;//16MB(8M chars)
	public static final int REDUCE_MAX_SIZE=0x800000;//16MB(8M chars)
//	public static final int COMBINER_MAX_SIZE=0x18;//200B(100 chars)
//	public static final int REDUCE_MAX_SIZE=0x18;//200B(100 chars)
	public static boolean isDebugMode=false;


	public static void printToErr(String message) {
		if (isDebugMode) {
			System.err.println(message);
		}
	}
	
	/**
	 * @param args
	 * @throws Exception
	 */
	public static void main(String[] args) throws Exception {
		int res=ToolRunner.run(new Configuration(), new Index(), args);
		System.exit(res);
	}

	@Override
	public int run(String[] args) throws Exception {
		JobConf conf = new JobConf(Index.class);
		
		conf.setJobName("InvertedIndex");
		
		conf.setOutputKeyClass(Text.class);
		conf.setOutputValueClass(Text.class);
		
		
//		conf.setCombinerClass(IndexCombiner.class);
		conf.setReducerClass(IndexReducer.class);
		conf.setMapperClass(IndexMapper.class);
		conf.setPartitionerClass(IndexPartitioner.class);
		
		conf.setInputFormat(StreamInputFormat.class);
		conf.setOutputFormat(TextOutputFormat.class);
		
		
		/* this is the part where stream is used to get the whole XML document */
		conf.set("stream.recordreader.class", "org.apache.hadoop.streaming.StreamXmlRecordReader");
		conf.set("stream.recordreader.begin", "<DOC>");
		conf.set("stream.recordreader.end", "</DOC>"); 
		
//		conf.set("mapred.job.map.memory.mb","2000");
//		 conf.set("mapred.job.reduce.memory.mb","2000");

		
		conf.setNumReduceTasks(5);//set 5 shards
//		conf.setNumReduceTasks(2);//set 5 shards
//		conf.setNumMapTasks(10);
		
		FileInputFormat.setInputPaths(conf, new Path(args[0]));
		FileOutputFormat.setOutputPath(conf, new Path(args[1]));

		JobClient.runJob(conf);
		
		return 0;
	}
}
