package org.zjt.wordcnt.demo;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * DESC
 *
 * @author
 * @create 2017-05-23 上午11:24
 **/
public class WordJobStart {
    public static void main(String[] args) throws Exception {

        // TODO: 2017/5/23 hadoop配置的设置
        Configuration config = new Configuration();
        config.set("fs.defaultFS", "hdfs://master:9000");
        config.set("yarn.resourcemanager.hostname", "master");


        // TODO: 2017/5/23 配置wordCount任务
        Job job = Job.getInstance(config);
        job.setJarByClass(WordJobStart.class);
        job.setMapperClass(WordMapper.class);
        job.setReducerClass(WordReducer.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setJobName("WordJobStart");


        // TODO: 2017/5/23  配置job任务的输入、输出流
        FileInputFormat.addInputPath(job, new Path("/usr/input/"));
        FileSystem fs = FileSystem.get(config);
        Path out = new Path("/usr/output/");


        // TODO: 2017/5/23 MapperReduce的输出文件目录不能存在
        if (fs.exists(out))
            fs.delete(out, true); //第二个参数为递归删除
        FileOutputFormat.setOutputPath(job, out);


        // TODO: 2017/5/23 等待任务执行完成
        boolean f = job.waitForCompletion(true);
        if (f) {
            System.out.println("job 运行完毕");
        }
    }
}
