package com.qingguo.MapReduce;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import java.io.IOException;

public class WC {
    /**
     * 写一个简单的wordcount的mapreduce
     * 代码中，它们是两个类
     * 一个是Mymap类
     * 一个是Myreduce类
     */
//第一步：创建Mymap类继承Mapper类
    public static class Mymap extends Mapper
            //KEYIN, VALUEIN, KEYOUT, VALUEOUT 是map输入和输出的序列化类。这四个泛型需要手动import它们的类
            <LongWritable, Text, Text, LongWritable> {
        //重写map函数，实现自己的一些业务逻辑，每一条数据会调用一次map函数
        //每个map任务会处理默认128M的数据，每个map任务的处理逻辑一致。
        @Override
        protected void map(LongWritable k1, Text v1, Context context)
                throws IOException, InterruptedException {
            //context是上下文，记录整个mr任务的信息，传递数据，读写数据
            //将源文件的内容进行切分,
            String[] split = v1.toString ().split (",");
            for (String str : split) {
                Text text = new Text (str);
                //每遍历一个都.write一次;相同k的数据写进同一个临时文件(本地磁盘中)
                context.write (text, new LongWritable (1l));
            }
        }
    }
//********************************************************************************************
//第二步：创建Myreduce类并继承

    public static class Myreduce extends Reducer
            <Text, LongWritable, Text, LongWritable> {
        //重写reduce函数，每一组数据会调用一次reduce函数
        @Override
        protected void reduce
        (Text k2, Iterable<LongWritable> v2, Context context)
                throws IOException, InterruptedException {
            //设置一个变量来接收出现次数
            long sum = 0l;
            for (LongWritable longWritable : v2) {
                //.get（）取出里面的数值
                sum = sum + longWritable.get ();
            }
            //将数据整理后，写出去
            context.write (k2, new LongWritable (sum));
        }
    }

    //********************************************************************************************
//第三步，组装mapreduce
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//加载hadoop的配置参数
        Configuration conf = new Configuration ();
//创建任务的对象(单例设计模式)传入配置参数，并取名字
        Job job = Job.getInstance (conf, WC.class.getName ());
//设置打包的类
        job.setJarByClass (WC.class);
//设置读取文件的hdfs路径
        FileInputFormat.addInputPath (job, new Path (args[0]));
//指定需要执行的map类
        job.setMapperClass (Mymap.class);
//指定map输出的序列化类型
        job.setMapOutputKeyClass (Text.class);
        job.setMapOutputValueClass (LongWritable.class);
//指定需要执行的reduce类
        job.setReducerClass (Myreduce.class);
//指定reduce的序列化类
        job.setOutputKeyClass (Text.class);
        job.setOutputValueClass (LongWritable.class);
//指定输出的hdfs路径
        FileOutputFormat.setOutputPath (job, new Path (args[1]));
//提交任务，等待执行完成，并打印执行日志
        job.waitForCompletion (true);
    }
}
