package com.beifeng.hadoop.hdfs;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;


//map类
//reduce类
//driver驱动类

/**
 * 
 * @author zh
 *input  -> map() ->   shuffle  -> reduce() -> output
 *
 *
 *
 */


public class ModuleMapReduce  extends Configured  implements Tool{
	
	public static class ModuleMap extends Mapper<LongWritable, Text, Text, IntWritable>{

		   Text mapoutkey = new Text();
		  final static IntWritable mapoutpuvalue = new IntWritable(1);
		
		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			
			//line 每一行读取的内容<key :0 value:{hadoop hive}>
			//System.out.println("偏移量=====： "+key.get());
			System.out.println("开始map任务，key:"+key+",value:"+value);
			String line = value.toString();
			StringTokenizer stringTokenizer  = new StringTokenizer(line);
			while(stringTokenizer.hasMoreTokens()){
				//get word value
				String wordVlu = stringTokenizer.nextToken();
				//output
				mapoutkey.set(wordVlu);
				context.write(mapoutkey, mapoutpuvalue);
				System.out.println("<"+mapoutkey+","+mapoutpuvalue+">");
			}
			
			
		}		
		
	}
	
	
	public static class ModuleReduce extends Reducer<Text, IntWritable, Text, IntWritable>{

			IntWritable result = new IntWritable();
		
		@Override
		protected void reduce(Text key, Iterable<IntWritable> values , Context context)
				throws IOException, InterruptedException {
			
			System.out.println("开始reduce任务，key:"+key+",value:"+values);
			//临时变量，用来求和
			int sum = 0;
			
			for (IntWritable value : values) {
				
				sum += value.get();
							
			}
			
			result.set(sum);
			System.out.println(key+result.toString());
			context.write(key, result);
			
		}		
		
	}

	
	public static class ModuleCombiner extends Reducer<Text, IntWritable, Text, IntWritable>{

		IntWritable result = new IntWritable();
	
	@Override
	protected void reduce(Text key, Iterable<IntWritable> values , Context context)
			throws IOException, InterruptedException {
		//临时变量，用来求和
		
		System.out.println("combiner开始");
		System.out.println("key="+key);
		int sum = 0;
		
		for (IntWritable value : values) {
			
			sum += value.get();
			 
		}
		
		result.set(sum);
		System.out.println(key+":"+result.toString());
		context.write(key, result);
	
	}		
	
  }
	
	public int run(String[] args) throws Exception {
		
		//1.获取hadoop的默认的配置信息
		Configuration conf = this.getConf();
		
		//2.生成对应的job
		Job job = Job.getInstance(conf, this.getClass().getSimpleName());
		
		//设置提交jar到集群上运行，如果不写，会报错
		job.setJarByClass(ModuleMapReduce.class);
		
		//3.设置job的内容
		
		//3.1 输入路径
		Path inpath = new Path(args[0]);
		FileInputFormat.setInputPaths(job, inpath);
		
		//3.2 map
		job.setMapperClass(ModuleMap.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(IntWritable.class);
		
		//===========shuffle=========
		
		job.setCombinerClass(ModuleCombiner.class);
		
		//===========shuffle=========
		//3.3 reduce
		job.setReducerClass(ModuleReduce.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		
		//job.setNumReduceTasks(2);
		
		
		//3.4 输出路径
		Path outpath = new Path(args[1]);
		//输出目录如果存在的话就把它删除
		FileSystem  fsh = outpath.getFileSystem(conf);
		if (fsh.exists(outpath)) {
			fsh.delete(outpath, true);
		}
		FileOutputFormat.setOutputPath(job, outpath);
		//4 提交的job是否运行成功
		boolean isSuccess = job.waitForCompletion(true);
		return isSuccess ? 0 : 1;
	}
	
	
		public static void main(String[] args) throws Exception {
		
		args = new String[]{
				"hdfs://bigdata01:8020/file.txt",
				"hdfs://bigdata01:8020/output_new"				
		};
		Configuration configuration = new Configuration();
		ToolRunner.run(configuration, new ModuleMapReduce(),args);
		
	}
}
