package com.zc.hadoop.mapreduce4;

import java.io.IOException;
import java.util.StringTokenizer;
import java.util.TreeSet;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

/**
 * key数量统计与排序
 *
 */
public class SortWordCountMapReduce extends Configured implements Tool {

	/**
	 * Mapper
	 */
	public static class SortWordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
		// 定义map输出key、value值
		protected Text mapOutputKey = new Text();
		// 以1开始计数
		protected IntWritable mapOutputVlaue = new IntWritable(1);

		// setup
		@Override
		protected void setup(Context context) throws IOException, InterruptedException {
			// TODO Auto-generated method stub
			super.setup(context);
		}

		@Override
		protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

			// 获取每行数据
			String lineValue = value.toString();

			System.out.println("Mapper lineValue=" + lineValue);

			// 切割数据
			StringTokenizer st = new StringTokenizer(lineValue);
			// 根据实际情况切割数据
			// String[] strs = lineValue.split("/t");

			while (st.hasMoreTokens()) {
				// 获取下一个值
				String wordValue = st.nextToken();

				System.out.println("Mapper wordValue=" + wordValue);

				mapOutputKey.set(wordValue);
				context.write(mapOutputKey, mapOutputVlaue);

			}

			// super.map(key, value, context);
		}

		// cleanup
		@Override
		protected void cleanup(Context context) throws IOException, InterruptedException {
			// TODO Auto-generated method stub
			super.cleanup(context);
		}

	}

	/**
	 * Reduce
	 * SortCountWritable 最为输出的key应经包办了key跟值，所以第四个参数使用NullWritable进行站位
	 */
	public static class SortWordCountReduce extends Reducer<Text, IntWritable, SortCountWritable, NullWritable> {

		// 输出最大数量值得个数
		private static final int k = 5;
		// 输出阶段，作为key排序（TreeSet 是一个有序的集合，它的作用是提供有序的Set集合）；（TreeSet中的元素支持2种排序方式：自然排序 或者 根据创建TreeSet 时提供的 Comparator 进行排序。这取决于使用的构造方法。）
		private TreeSet<SortCountWritable> topSet = new TreeSet<SortCountWritable>();

		@Override
		protected void setup(Reducer<Text, IntWritable, SortCountWritable, NullWritable>.Context context)
				throws IOException, InterruptedException {
			// TODO Auto-generated method stub
			super.setup(context);
		}
		
		@Override
		protected void reduce(Text key, Iterable<IntWritable> values, Context content)
				throws IOException, InterruptedException {
			// 初始值
			int sum = 0;
			
			// 遍历
			for (IntWritable value : values) {
				System.out.println("Reducer values value=" + value);
				sum += value.get();
			}
			System.out.println("Reducer sum=" + sum);
			
			// 把key,value放到自定义的参数中进行输出；即自定义输出类型的实例化
			SortCountWritable sortCountWritable = new SortCountWritable(key.toString(), sum);
			
			// 对元素排序
			topSet.add(sortCountWritable);
			
			// 找出数据中的前5个元素
			if (topSet.size() > k) {
				// 把超出的元素移除
				topSet.remove(topSet.last());
			}

//			super.reduce(key, values, content);
		}

		@Override
		protected void cleanup(Context context)
				throws IOException, InterruptedException {
			
			super.cleanup(context);
			
			// 把写的过程放到这
			for (SortCountWritable sortCountWritable : topSet) {
				context.write(sortCountWritable, NullWritable.get());
			}
			
		}


	}

	@Override
	public int run(String[] args) throws Exception {

		Configuration config = new Configuration();

		Job job = this.parseInputAndOutput(config, args, this);

		// 设置mapper
		job.setMapperClass(SortWordCountMapper.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(IntWritable.class);

		// 设置reduce
		job.setReducerClass(SortWordCountReduce.class);
		job.setOutputKeyClass(SortCountWritable.class);
		job.setOutputValueClass(NullWritable.class);

		// =====================shuffle=====================
		// job.setCombinerClass(StandardMapReduce.class);
		// job.setPartitionerClass(cls);
		// job.setGroupingComparatorClass(cls);
		// job.set...
		// =====================shuffle=====================

		// 提交job 等待运行结束 并在客户端显示运行信息
		boolean flag = job.waitForCompletion(true);
		return flag ? 0 : 1;
	}

	/**
	 * 设置输入输出位置，返回job
	 * @param conf 配置
	 * @param args [输入,输出位置]
	 * @param tool
	 * @return
	 * @throws IOException
	 */
	public Job parseInputAndOutput(Configuration conf, String[] args, Tool tool) throws IOException {
		// 设置job
		Job job = Job.getInstance(conf, tool.getClass().getSimpleName());

		// 设置job运行类
		job.setJarByClass(this.getClass());

		// 数据的输入
		Path inputDir = new Path(args[0]);
		FileInputFormat.addInputPath(job, inputDir);

		// 设置输出
		Path outputDir = new Path(args[1]);
		FileOutputFormat.setOutputPath(job, outputDir);

		return job;
	}

	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		int status = ToolRunner.run(conf, new SortWordCountMapReduce(), args);

		// System.exit(0)是将你的整个虚拟机里的内容都停掉了
		// ，而dispose()只是关闭这个窗口，但是并没有停止整个application exit()
		// 。无论如何，内存都释放了！也就是说连JVM都关闭了，内存里根本不可能还有什么东西
		// System.exit(0)是正常退出程序，而System.exit(1)或者说非0表示非正常退出程序
		// System.exit(status)不管status为何值都会退出程序。和return 相比有以下不同点：
		// return是回到上一层，而System.exit(status)是回到最上层
		System.exit(status);
	}

}
