package com.example.hadoopdemo1.main;

import com.example.hadoopdemo1.mapper.WordCountMapper;
import com.example.hadoopdemo1.reduce.WordCountReduce;
import com.example.hadoopdemo1.sort.MyStringComparator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.Job;

/**
 * @author cxx
 * @create 2020-02-09 10:54
 **/
public class WordCount {

    public static void main(String[] args) throws Exception {
        //       指定当前hadoop用户
        System.setProperty ("HADOOP_USER_NAME","root");
        Configuration conf = new Configuration ();
        conf.set("fs.defaultFS","hdfs://bigdata:9000");
        Job job = Job.getInstance(conf, "word count");

        job.setJarByClass(WordCount.class);
        job.setMapperClass(WordCountMapper.class);

        job.setSortComparatorClass (MyStringComparator.class);

//        job.setCombinerClass(WordCountReduce.class);
        job.setReducerClass(WordCountReduce.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);

        FileInputFormat.setInputPaths(job, new Path (args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

}
