package com.run.fjy.mr;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.elasticsearch.hadoop.mr.EsOutputFormat;

import com.run.bcpimp.util.RunTool;
import com.run.fjy.io.TerminalWritable;


public class NewAddGlobal extends RunTool
{

	public NewAddGlobal()
	{
	}

	public int exec(String arg0[])
		throws Exception
	{
		Configuration conf = getConf();
		conf.setBoolean("mapred.map.tasks.speculative.execution", false);
		conf.setBoolean("mapred.reduce.tasks.speculative.execution", false);
		conf.setInt("es.batch.size.entries", 8000);
		conf.set("es.nodes", "192.168.17.30:9200");
		conf.set("es.resource.write", "statis_all/newadd");
		conf.set("cfgfile.bcpextract", "conf/bcpextract_newadd.xml");
		Job job = new Job(conf);
		job.setJarByClass(NewAddGlobal.class);
		job.setReducerClass(NewAddGlobalReducer.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(TerminalWritable.class);
		job.setInputFormatClass(SequenceFileInputFormat.class);
		setInOutPaths(job, arg0);
		MultipleOutputs.addNamedOutput(job, "newaddhdfs", SequenceFileOutputFormat.class, Text.class, TerminalWritable.class);
		MultipleOutputs.addNamedOutput(job, "newaddesdb", EsOutputFormat.class, Text.class, MapWritable.class);
		System.exit(job.waitForCompletion(true) ? 0 : 1);
		return 0;
	}

	protected int getArgumentNumber()
	{
		return 2;
	}
}