package com.hadoop.chainmr;

import java.io.IOException;
import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
import org.apache.hadoop.mapreduce.lib.chain.ChainReducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;

public class ChainMapReduce {

	//private static final String INPUTPATH = "hdfs://input/chainmr/*";
	//private static final String OUTPUTPATH = "hdfs://output/chainmr";

	private static final String INPUTPATH = "input/chainmr/*";
	private static final String OUTPUTPATH = "output/chainmr";

	/*
	 * 类的执行顺序：FilterMapper1-->FilterMapper2-->SumReducer3-->FilterMapper4
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub

		try {
			Configuration conf = new Configuration();
			// 创建文件系统
			FileSystem fileSystem = FileSystem.get(new URI(OUTPUTPATH), conf);

			// 判断输出路径是否存在，如果存在则删除。
			if (fileSystem.exists(new Path(OUTPUTPATH))) {
				fileSystem.delete(new Path(OUTPUTPATH), true);
				System.out.println("delete outputpath:" + OUTPUTPATH);
			}

			// 创建job
			// Job job = new Job(conf, ChainMapReduce.class.getSimpleName());
			Job job = Job.getInstance(conf, "chain mr");
            job.setJarByClass(ChainMapReduce.class);
			// jobConf.setJobName("chaining mr");
			// 设置输入目录
			FileInputFormat.addInputPath(job, new Path(INPUTPATH));

			// 设置输入文件格式
			job.setInputFormatClass(TextInputFormat.class);

			// 设置自定义的Mapper类
			// JobConf map1Conf = new JobConf(false);
			ChainMapper.addMapper(job, FilterMapper1.class, LongWritable.class, Text.class, Text.class,
					DoubleWritable.class, conf);

			ChainMapper.addMapper(job, FilterMapper2.class, Text.class, DoubleWritable.class, Text.class,
					DoubleWritable.class, conf);

			ChainReducer.setReducer(job, SumReducer3.class, Text.class, DoubleWritable.class, Text.class,
					DoubleWritable.class, conf);
			// 注意：Reducer后面的mapper也要用ChainReducer加载
			ChainReducer.addMapper(job, FilterMapper4.class, Text.class, DoubleWritable.class, Text.class,
					DoubleWritable.class, conf);

			// 设置自定义Mapper类的输出key和value
			job.setMapOutputKeyClass(Text.class);
			job.setMapOutputValueClass(DoubleWritable.class);

			// 设置分区
			job.setPartitionerClass(HashPartitioner.class);
			// 设置reduce数量
			job.setNumReduceTasks(1);

			// 设置自定义的Reducer
			// 设置输出的Key和Value类型
			job.setOutputKeyClass(Text.class);
			job.setOutputValueClass(DoubleWritable.class);

			// 设置输出路径
			FileOutputFormat.setOutputPath(job, new Path(OUTPUTPATH));
			// 设置输出格式
			job.setOutputFormatClass(TextOutputFormat.class);

			// 提交任务
			System.exit(job.waitForCompletion(true) ? 0 : 1);
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}

	/**
	 * 过滤掉金额大于10,000的记录
	 * 
	 */
	public static class FilterMapper1 extends Mapper<LongWritable, Text, Text, DoubleWritable> {

		// 定义输入的key和value
		private Text outKey = new Text();
		private DoubleWritable outValue = new DoubleWritable();

		@Override
		protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
			// 获取行文本内容
			String line = value.toString();
			if (line.length() > 0) {
				// 使用一个空格切分行内容
				String[] splits = line.split(" ");
				// 获取money
				double money = Double.parseDouble(splits[1].trim());
				// 过滤掉大于10,000的数据
				if (money <= 10000) {
					// 设置过滤后的结果
					outKey.set(splits[0]);
					outValue.set(money);
					context.write(outKey, outValue);
				}
			}

		}// map
	}// FilterMapper1

	/**
	 * 过滤掉金额大于100的记录
	 * 
	 * @author LBH
	 *
	 */
	public static class FilterMapper2 extends Mapper<Text, DoubleWritable, Text, DoubleWritable> {

		@Override
		protected void map(Text key, DoubleWritable value, Context context) throws IOException, InterruptedException {
			if (value.get() < 100) {
				// 写入结果
				context.write(key, value);
			}
		}// map
	}// FilterMapper2

	/**
	 * 金额汇总
	 * 
	 * @author LBH
	 *
	 */
	public static class SumReducer3 extends Reducer<Text, DoubleWritable, Text, DoubleWritable> {
		// 定义输出的value
		private DoubleWritable outValue = new DoubleWritable();

		@Override
		protected void reduce(Text key, Iterable<DoubleWritable> values, Context context)
				throws IOException, InterruptedException {
			// 定义汇总结果
			double sum = 0;
			for (DoubleWritable val : values) {
				sum += val.get();
			}
			outValue.set(sum);
			// 写入结果
			context.write(key, outValue);
		}
	}// SumReducer3

	/**
	 * 过滤掉商品名称长度大于8的商品
	 * 
	 * @author LBH
	 *
	 */
	public static class FilterMapper4 extends Mapper<Text, DoubleWritable, Text, DoubleWritable> {

		@Override
		protected void map(Text key, DoubleWritable value, Context context) throws IOException, InterruptedException {
			if (key.toString().length() < 8) {
				System.out.println("写出去的内容为：" + key.toString() + "++++" + value.toString());
				context.write(key, value);
			}
		}

	}// FilterMapper4
}
