package demo.mapreduce.java.demos;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.lang.reflect.Array;
import java.net.URI;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;

/**
 * @Author: Qiao Hang
 * @CreateDate: 2021/7/28 上午11:37
 * @UpdateDate:
 * @Description:
 */
public class WordCountDemo2 {

    private static final String WORD_SPLIT_TOKEN = " ";

    /**
     * WordCountMap类，用于将文件资源按行进行文本读取，进行小文本批量及Map操作，
     * 该阶段只能将文档流打散，将文本行中每次出现的单词均以1次为基础单元流入下一阶段(Reduce).
     * <Long, Text,Text, IntWritable>
     *     LongWritable: 表示当前读取到的文档在总文档资源中的偏移量
     *     Text: 表示当前锁读取的行文本
     *     Text: Map结果的关键字Key
     *     IntWritable: Map结果的关键字Value
     *
     * NOTICE: map阶段可能有多个，下一个map阶段一定是基于上一次map的结果进行的！
     *         该处理方式在大数据流式处理中类比 "算子"，算子可以有多个！
     */
    public static class WordCountMapper extends Mapper<LongWritable, Text,Text, IntWritable>{
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            IntWritable countOne = new IntWritable(1);
            StringTokenizer tokenizer = new StringTokenizer(value.toString(),WORD_SPLIT_TOKEN);
            while (tokenizer.hasMoreElements()){
                String word = tokenizer.nextElement().toString();
                context.write(new Text(word),countOne);
            }
        }
    }

    /**
     * WordCountReduce类，用于将上阶段(Map)打散的单词序列进行简单收集，对于收集到一组的单词集进行处理Reduce.
     * <Text,IntWritable,Text,IntWritable>
     *      Text: 表示上阶段Map流出的结果Key类型，hadoop mapreduce意在分阶段执行map与reduce，
     *              下一阶段的处理始终依赖于上阶段的处理结果，reduce执行完成生，job工作结束！
     *      IntWriteable: 上一阶段Map流出的结果value
     *      Text: 该reduce阶段流出的结果集key
     *      IntWritable: 该reduce阶段流出的结果集value
     *
     *  NOTICE: reduce阶段只有一个，经过上面诸多map阶段的拆分与归并最终在reduce阶段进行结果集聚合统计，
     *          该处理方式在大数据流式处理中类比 "action", action只有一个！
     */
    public static class WordCountReduce extends Reducer<Text,IntWritable,Text,IntWritable>{
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int countSum = 0;
            for (Iterator<IntWritable> iterator = values.iterator();iterator.hasNext();){
                countSum += iterator.next().get();
            }
            context.write(key,new IntWritable(countSum));
        }
    }


    public static void main(String[] args) {
        try {
            List<String> INPUT_GROUP = Arrays.asList(
                    "/two.txt",
                    "/copy/oneDir/copy_eee.txt",
                    "/move/oneDir/move_ddd.txt"
            );
            String OUTPUT_PATH="/output/wordcount";

            Configuration configuration = new Configuration();
            configuration.set("fs.defaultFS", "hdfs://icasue111:9090");
            final FileSystem fileSystem = FileSystem.get(configuration);

            //确保INPUT的路径存在，而OutPut得路径会在最后生成结果时生成、
            //所以确保OUTPUT得文件不存在
            if(fileSystem.exists(new Path(OUTPUT_PATH))){
                fileSystem.delete(new Path(OUTPUT_PATH),true);
            }

            //获取Job实例.
            Job job = Job.getInstance(configuration, "WordCountDemo");
            //设置Job主类
            job.setJarByClass(WordCountDemo2.class);

            //设置阶段 与 输出(mapper / reduce)
            job.setMapperClass(WordCountMapper.class);
            job.setReducerClass(WordCountReduce.class);
            //mapper 输出
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(IntWritable.class);
            //reducer 输出
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(IntWritable.class);

            //设置Job输入(文本输入)
            for (String oneInputPath : INPUT_GROUP) {
                FileInputFormat.addInputPath(job,new Path(oneInputPath));
            }
            Path outputPath = new Path(OUTPUT_PATH);
            FileOutputFormat.setOutputPath(job,outputPath);

            //提交任务，Waiting complete.
            System.exit(job.waitForCompletion(true)? 0:1);
        }catch (Throwable e){
            e.printStackTrace();
        }
    }

}
