package com.ww.hadoop.mr.wc;

import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
@Slf4j
public class MyWordCountLocal {


    //bin/hadoop command [genericOptions] [commandOptions]
    //    hadoop jar  ooxx.jar  ooxx   -D  ooxx=ooxx  inpath  outpath
    //  args :   2类参数  genericOptions   commandOptions
    //  人你有复杂度：  自己分析 args数组
    //
    public static void main(String[] args) throws Exception {

        log.info("开始执行");

        System.setProperty("HADOOP_USER_NAME", "root");

        Configuration conf = new Configuration(true);
        //让框架知道是windows异构平台运行
        //conf.set("mapreduce.app-submission.cross-platform","true");
        conf.set("mapreduce.framework.name","local");

        Job job = Job.getInstance(conf);
        //job.setJar("D:\\me\\study\\code\\idea\\try-hadoop\\target\\try-hadoop-1.0-SNAPSHOT.jar");

        job.setJarByClass(MyWordCountLocal.class);

        job.setJobName("wc01");

        Path infile = new Path("/data/wc/input");
        TextInputFormat.addInputPath(job, infile);

        Path outfile = new Path("/data/wc/output1");
        if (outfile.getFileSystem(conf).exists(outfile)) outfile.getFileSystem(conf).delete(outfile, true);
        TextOutputFormat.setOutputPath(job, outfile);

        job.setMapperClass(MyMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setReducerClass(MyReducer.class);

//        job.setNumReduceTasks(2);
        // Submit the job, then poll for progress until the job is complete
        job.waitForCompletion(true);

    }

}
