package com.mapreduce;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;
import java.net.URI;

// 统计词频
// 继承配置类：用于读取配置
// 实现工具接口：运行程序
public class WordCount extends Configured implements Tool {
    // 创建map类，继承Mapper，需要规定泛型，规定K1,V1和K2,V2的类型
    static class WordCountMapper
            extends Mapper<LongWritable, Text, Text, IntWritable>{
        @Override
        protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException {
            //  key:k1          value:v1                 context:上下文，传递键值对
            //  k1(偏移量)		v1(每一行数据)
            //  0				hello,world,hello,hi
            //  20 				hello,apple,hello
            String[] words = value.toString().split(",");
            // words:["hello","world","hello","hi"]
            for (String word:words){
                // 创建k2,v2
                String k2 = word;
                int v2 = 1;
                // 传出k2,v2
                context.write(new Text(k2),new IntWritable(v2));
            }
            //  ->map(根据业务)：统计词频
            //  k2				v2
            //  hello			1
            //  world			1
            //  hello			1
            //  hi				1
            //  hello			1
        }
    }

    static class WordCountReducer
            extends Reducer<Text, IntWritable, Text, IntWritable>{
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
            //              key values   context
            //进过shuffle之后的K2   V2
            // k2(key)				v2(values)
            //hello			[1,1,1]
            //world			[1]
            //hi				[1]
            int sum = 0;
            for (IntWritable val:values){
                sum += val.get();
            }
            context.write(key,new IntWritable(sum));
            //->reduce:合
            //k3				v3
            //hello			3
            //world			1
            //hi				1
        }
    }

    @Override
    public int run(String[] args) throws Exception {
        // 构建配置
        Configuration conf = getConf();
        // 指定输入路径和输出路径
//        Path input = new Path(
//                "hdfs://192.168.10.11:9000/worddata");
//        // 输出路径必须不存在
//        Path output = new Path(
//                "hdfs://192.168.10.11:9000/wordoutput");
        // 通过命令中的参数去传
        // yarn jar jar包名 全包名加类名 -D input=输入路径
        // -D output=输出路径
        String inData = conf.get("input");
        Path input = new Path(inData);
        String outData = conf.get("output");
        Path output = new Path(outData);

        FileSystem fs = FileSystem.get(
                new URI("hdfs://192.168.10.11:9000"),conf);
        // 输出路径存在则删除
        if (fs.exists(output)) fs.delete(output,true);
        // 构建Job
        Job job = Job.getInstance(conf);
        // 设置Job名
        job.setJobName("word");
        // 设置Job主类
        job.setJarByClass(this.getClass());
        // 设置Map配置 设置Map主类
        job.setMapperClass(WordCountMapper.class);
        // 规定K2,V2的类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        // 设置Reduce的配置，设置Reduce的主类
        job.setReducerClass(WordCountReducer.class);
        // 设置K3,V3的类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        // 设置输入处理器
        job.setInputFormatClass(TextInputFormat.class);
        // 输入处理器绑定输入路径
        TextInputFormat.addInputPath(job,input);
        // 设置输出处理器
        job.setOutputFormatClass(TextOutputFormat.class);
        // 输出处理器绑定输出路径
        TextOutputFormat.setOutputPath(job,output);
        // 提交Job作业,作业运行成功返回0，作业运行失败返回-1
        return job.waitForCompletion(true)?0:-1;
    }

    public static void main(String[] args) throws Exception {
        // 退出JVM虚拟机
        System.exit(ToolRunner.run(new WordCount(),args));
    }
}
