package com.huawei.mapreduce;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;


// yarn jar /root/MRWordCOunt-1.0-SNAPSHOT.jar com.huawei.mapreduce.MRWordCount /user/FinalExam/words.txt /user/mr_output
public class MRWordCount {

    /**
     * 建立Mapper类TokenizerMapper继承自泛型类Mapper
     * Mapper类:实现了Map功能基类
     * Mapper接口：
     * WritableComparable接口：实现WritableComparable的类可以相互比较。所有被用作key的类应该实现此接口。
     * Reporter 则可用于报告整个应用的运行进度，本例中未使用。
     *
     */
    public static class MyMapper extends Mapper<LongWritable, Text, Text, LongWritable>{
        
        /**
         * Mapper中的map方法：
         * void map(K1 key, V1 value, Context context)
         * 映射一个单个的输入k/v对到一个中间的k/v对
         * 输出对不需要和输入对是相同的类型，输入对可以映射到0个或多个输出对。
         * Context：收集Mapper输出的<k,v>对。
         * Context的write(k, v)方法:增加一个(k,v)对到context
         * 程序员主要编写Map和Reduce函数.这个Map函数使用StringTokenizer函数对字符串进行分隔,通过write方法把单词存入word中
         * write方法存入(单词,1)这样的二元组到context中
         */
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String line = value.toString();
            String[] strs = line.split(" ");
            for (String word : strs){
                Text k2 = new Text(word);
                LongWritable v2 = new LongWritable(1);
                context.write(k2,v2);

            }
        }
    }

    public static class MyReducer extends Reducer<Text, LongWritable, Text, LongWritable> {
        
        /**
         * Reducer类中的reduce方法：
         * void reduce(Text key, Iterable<IntWritable> values, Context context)
         * 中k/v来自于map函数中的context,可能经过了进一步处理(combiner),同样通过context输出
         * * 本操作主要是进行reduce的数据处理
         * 进行合并后数据的最终统计
         * Reduce类要使用的数据类型如下：
         * Text：Map输出的文本内容；
         * IntWritable：Map处理得到的个数
         * Text：Reduce输出文本
         * IntWritable：Reduce的输出个数
         */
        @Override
        protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
            long count = 0;
            for (LongWritable times : values) {
                count += times.get();
            }
            LongWritable v3 = new LongWritable(count);
            context.write(key,v3);
        }
    }


    public static void main(String[] args) throws Exception {
        /**
         * Configuration：map/reduce的j配置类，向hadoop框架描述map-reduce执行的工作
         */
        System.out.println(args[0]);
        System.out.println(args[1]);
        Configuration conf =new Configuration();
        Job job = Job.getInstance(conf, MRWordCount.class.getSimpleName());
        // 指定mr类
        job.setJarByClass(MRWordCount.class);

        FileInputFormat.setInputPaths(job,new Path(args[0]));
        FileOutputFormat.setOutputPath(job,new Path(args[1]));


        job.setMapperClass(MyMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);

        job.setReducerClass(MyReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);

        boolean wait = job.waitForCompletion(true);
        System.exit(wait ? 0 : 1);


    }
}
