package basic;

import java.util.HashSet;
import java.util.Set;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class Sougou1 {

	/**
	 * @param args
	 */
	public static void main(String[] args) throws Exception{
		if(args.length!=2){
			System.err.println("User path err!!");
			System.exit(-1);
		}
		Job job = new Job(new Configuration(),"sougou");
		job.setJarByClass(Sougou1.class);
		
		FileInputFormat.addInputPath(job, new Path(args[0]));
		FileOutputFormat.setOutputPath(job,new Path(args[1]));
		
		job.setMapperClass(uidmap.class);
		job.setReducerClass(uidreduce.class);
		
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(NullWritable.class);
		
		job.waitForCompletion(true);
		
	}

	public static class uidmap extends
			Mapper<LongWritable, Text, Text, NullWritable> {
		protected void map(
				LongWritable key,
				Text value,
				org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, NullWritable>.Context context)
				throws java.io.IOException, InterruptedException {
			String[] lines = value.toString().split("\t");
			if (lines.length == 6 && lines[2].trim().contains("百度")) {
				String uid = lines[1].trim();
				context.write(new Text(uid), NullWritable.get());// map输出的中间结果是：《百度外卖》

			}
		};
	}

	// map产生的中间结果需要经过shuffle阶段处理。通过shuffle默认对key的操作来实现key的去重。去重之后的数据，交给reduce函数处理。
	public static class uidreduce extends Reducer<Text, NullWritable, Text, NullWritable> {
		Set<String> sets = new HashSet<String>();

		protected void reduce(
				Text k2,
				java.lang.Iterable<NullWritable> values,
				org.apache.hadoop.mapreduce.Reducer<Text, NullWritable, Text, NullWritable>.Context context)
				throws java.io.IOException, InterruptedException {
			// context.write(k2, NullWritable.get());
			sets.add(k2.toString());
		};

		protected void cleanup(
				org.apache.hadoop.mapreduce.Reducer<Text, NullWritable, Text, NullWritable>.Context context)
				throws java.io.IOException, InterruptedException {
			// 统计出。。。所有用户的数量
			// sets集合的大小。就是用戶的數量
			//所有reduce函数在处理完毕之后再执行cleanup函数
			context.write(new Text("the number of search 'baidu' users :"
					+ sets.size()), NullWritable.get());
		};
	}

}
