package org.hadoop.ch;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

/*
 * MapReduce 初级案例 wordCount程序
 */
public class MyWordCount {
	
	// Mapper 区域
	/*
	 * 
	 * KEYIN,      VALUEIN,       KEYOUT,     VALUEOUT
	 * 输入key 类型     输入value 类型           输出key类型           输出value 类型 
	 * 
	 */
	static class MyMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
        private final static IntWritable one = new IntWritable(1);
		private Text word = new Text();
		
		@Override
		protected void map(LongWritable key, Text value,
				Context context)
				throws IOException, InterruptedException {
			// TODO Auto-generated method stub	
			//super.map(key, value, context);
			
			String line = value.toString();
			// 进行拆分输入的value 文本信息
			StringTokenizer stringTokenizer =new StringTokenizer(line);
			while(stringTokenizer.hasMoreTokens()){
			  // 获取拆分后的每个值
			  String spvalue = stringTokenizer.nextToken();
			  // 给map输入的key赋值
			  word.set(spvalue);
			  // 上下文输出map的key 和value 值
			  context.write(word, one);
			  
			}
	
			
		}
		
		
	}
	
	
	// Reducer区域
	/*
	 * 
	 * 
	 * 
	 * 
	 */
	static class MyReducer extends Reducer<Text, IntWritable,  Text, IntWritable>{

		private IntWritable resultvalue = new IntWritable();
		
		
		@Override
		protected void reduce(Text key, Iterable<IntWritable> values,
				Context context)
				throws IOException, InterruptedException {
			// TODO Auto-generated method stub
	
			int sum = 0;
			for(IntWritable value:values){
				// 累加
				sum+=value.get();	
				
			}
			
			// 设置总次数
			resultvalue.set(sum);
			// 上下文输出
			context.write(key, resultvalue);
				
		}
	
		
	}
	
	
	// client 区域
	public static void main(String[] args ) throws Exception{
		
		// 加载配置文件
		Configuration conf = new Configuration();
		
		// 创建job 设置和配置job 名称
		Job job =new Job(conf, "wc");
		// 进行Job运行类
		job.setJarByClass(MyWordCount.class);
		
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		
		// 设置mapper 和reducer 类
		job.setMapperClass(MyMapper.class);
		job.setReducerClass(MyReducer.class);
		
		job.setInputFormatClass(org.apache.hadoop.mapreduce.lib.input.TextInputFormat.class);
		job.setOutputFormatClass(TextOutputFormat.class);
		
		// 设置输入文件和输出文件目录
		FileInputFormat.addInputPath(job, new Path(args[0]));
		FileOutputFormat.setOutputPath(job, new Path(args[1]));
		
		// 提交job 等待运行结果 并在客户端显示运行信息
		boolean issuccess = job.waitForCompletion(true);
		
		// 结束程序
		System.exit(issuccess?0:1);
	}
	
	
	
}
