package base.test1.wordcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

public class MyDrive {  
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException, URISyntaxException {

        //1.创建一个 job
        Job job = Job.getInstance(new Configuration());

        //2.指定 job的jar类型
        job.setJarByClass(MyDrive.class);

        //3.指定mapper的方法
        job.setMapperClass(MyMapper.class);

        //4.指定reduce的方法
        job.setReducerClass(MyReduce.class);

        //5.指定输出类型 k2,v2   k4,v4
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        //指定路径
        Path input = new Path("hdfs://192.168.7.100:9000/wordcount/input");
        Path output = new Path("hdfs://192.168.7.100:9000/wordcount/output");

        FileInputFormat.addInputPath(job,input);
        FileOutputFormat.setOutputPath(job,output);
//        FileInputFormat.setInputPaths(job,input);
//        FileOutputFormat.setOutputPath(job,output);

        FileSystem fs = FileSystem.get(new URI("hdfs://192.168.7.100:9000"), new Configuration());
        if(fs.exists(output)){
            fs.delete(output,true);
        }
//         强制加载hadoop.dll文件
//        System.load("G:\\angyun\\Hadoop\\hadoop-3.1.3\\bin\\hadoop.dll");

        //5.提交任务job
        boolean b = job.waitForCompletion(true);

        System.exit(b?0:1);

    }
}
