package parallel;
/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */


import java.net.URI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.KeyValueLineRecordReader;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.lib.partition.InputSampler;
import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

/**
 * Generates the sampled split points, launches the job, and waits for it to
 * finish. 
 * <p>
 * To run the program: 
 * <b>bin/hadoop jar hadoop-examples-*.jar terasort in-dir out-dir</b>
 */
public class ParallelEventSort extends Configured implements Tool {
	private static final Log LOG = LogFactory.getLog(ParallelEventSort.class);
	public int run(String[] args) throws Exception {
		LOG.info("starting");
		Configuration conf = getConf();
		conf.set("mapred.textoutputformat.separator", ",");
		conf.set(KeyValueLineRecordReader.KEY_VALUE_SEPERATOR, ",");
		Job job = new Job(conf, "parallelEventSort");
		job.setJarByClass(ParallelEventSort.class);
		job.setOutputKeyClass(LongWritable.class);
		job.setOutputValueClass(Text.class);
		job.setInputFormatClass(EventInputFormat.class);
		job.setOutputFormatClass(TextOutputFormat.class);
		job.setPartitionerClass(TotalOrderPartitioner.class);
		EventInputFormat.setInputPaths(job, new Path(args[0]));
		TextOutputFormat.setOutputPath(job, new Path(args[1]));
		// user a random sampler
		InputSampler.Sampler<LongWritable, Text> sampler =
				new InputSampler.RandomSampler<LongWritable, Text>(0.1, 10000, 10);
		Path input = new Path(args[0]);
		input = input.makeQualified(input.getFileSystem(conf));
		Path partitionFile = new Path(input, "_partitions");
		TotalOrderPartitioner.setPartitionFile(job.getConfiguration(),
				partitionFile);
		InputSampler.writePartitionFile(job, sampler);
		URI partitionUri = new URI(partitionFile.toString() + "#_partitions");
		DistributedCache.addCacheFile(partitionUri,conf);
		DistributedCache.createSymlink(conf);
		conf.setInt("dfs.replication", 1);
		int result =  job.waitForCompletion(true) ? 0 : 1;
		LOG.info("done");
		return result;
	}

	/**
	 * @param args
	 */
	public static void main(String[] args) throws Exception {
		int res = ToolRunner.run(new Configuration(), new ParallelEventSort(), args);
		System.exit(res);
	}

}
