package com.fengye.hadoop.example;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;
import java.util.StringTokenizer;

/**
 * @author fengyexjtu@126.com
 */
public class WordCount {
    public static void main(String[] args) throws Exception {

        ToolRunner.run(new WordCountRunner(), args);
    }
}

/**
 * <Object, Text, Text, LongWritable>
 * object:
 * Text: 输入
 * Text: 输出的key
 * LongWritable: 输出的value
 * 结果就是输出
 * <hello,1>
 * <word,2>
 * 类似结构的数据
 */
class WordCountMapper extends Mapper<Object, Text, Text, LongWritable> {
    private Text word = new Text();
    private LongWritable one = new LongWritable(1);

    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        super.setup(context);
        System.out.println("调用 WordCountMapper#setup");
    }

    /**
     * 对文件中的内容按行进行遍历,按行取值
     *
     * @param key
     * @param value   一行的内容
     * @param context mapper的结果,用于reduce的输入
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
        String line = value.toString();
        StringTokenizer tokenizer = new StringTokenizer(line);
        while (tokenizer.hasMoreTokens()) {
            //对每行的单词进行分割,每个单词记录下来,并记录为1
            word.set(tokenizer.nextToken());
            context.write(word, one);
            // System.out.println(new String(word.copyBytes()) + ":" + one.get());
        }
    }

    @Override
    protected void cleanup(Context context) throws IOException, InterruptedException {
        super.cleanup(context);
        System.out.println("调用WordCountMapper#cleanup");
    }
}

/**
 * Text, LongWritable, Text, IntWritable
 * 前面两个表示输入类型
 * 后面两个表示输出类型
 */
class WordCountReduce extends Reducer<Text, LongWritable, Text, LongWritable> {
    private LongWritable count = new LongWritable(1);

    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        super.setup(context);
        System.out.println("调用WordCountReduce#setup");
    }

    @Override
    protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
        long sum = 0;
        // System.out.print(key + ":");
        for (LongWritable value : values) {
            sum += value.get();
            // System.out.print(value.get() + " ");
        }
        count.set(sum);
        // System.out.print("sum:" + sum + "\n");
        context.write(key, count);
    }

    @Override
    protected void cleanup(Context context) throws IOException, InterruptedException {
        super.cleanup(context);
        System.out.println("调用WordCountReduce#cleanup");
    }
}

class WordCountRunner implements Tool {
    private Configuration configuration = null;

    @Override
    public int run(String[] args) throws Exception {
        Configuration conf = this.getConf();
        System.out.println(conf.get("fs.defaultFS"));
        Job job = Job.getInstance(conf);
        //1.输入
        FileInputFormat.addInputPath(job, new Path("/java/api/word.txt"));
        //2.map
        job.setMapperClass(WordCountMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);
        //3.shuffle

        //4.reduce
        job.setReducerClass(WordCountReduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);
        // job.set

        //5.output
        FileSystem fileSystem = FileSystem.get(conf);
        fileSystem.delete(new Path("/java/api/count"),true);
        FileOutputFormat.setOutputPath(job, new Path("/java/api/count"));
        return job.waitForCompletion(true) ? 0 : 1;
    }

    @Override
    public void setConf(Configuration conf) {

        this.configuration = conf;
        this.configuration.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        this.configuration.set("fs.defaultFS", "hdfs://192.168.1.2:9000");
        this.configuration.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
    }

    @Override
    public Configuration getConf() {
        return this.configuration;
    }
}