package cn.chen.hd.mr;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.InputSampler;
import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;

public class TotalSort{
	public static void main(String[] args) throws Exception {
		Path inputPath = new Path("");
		Path outputPath = new Path("");
		// 分区文件路径
		Path partitionFile = new Path("");
		int reduceNumber = 0;

		//RandomSampler第一个参数表述会被选中的概率，第二个参数是一个选取的样本数，
		//第三个参数是最大读取InputSplit数
		InputSampler.RandomSampler<Text, Text> samper = new InputSampler.RandomSampler<Text, Text>(0.1, 10000, 10);
		Configuration conf = new Configuration();
		TotalOrderPartitioner.setPartitionFile(conf, partitionFile);
		
		Job job = new Job(conf);
		job.setJobName("TotalSort");
		job.setJarByClass(TotalSort.class);
		job.setInputFormatClass(KeyValueTextInputFormat.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(Text.class);
		job.setNumReduceTasks(reduceNumber);

		job.setPartitionerClass(TotalOrderPartitioner.class);
		FileInputFormat.setInputPaths(job, inputPath);
		FileOutputFormat.setOutputPath(job, outputPath);
		outputPath.getFileSystem(conf).delete(outputPath, true);

		InputSampler.writePartitionFile(job, samper);
		System.out.println(job.waitForCompletion(true)? 0 : 1);
	}
}