package com.iwy.hadoop.mapreduce;

import com.iwy.hadoop.hdfs.HDFSClient;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

/**
 * @Author weibi
 * @Date 2021/1/21 9:45
 * @Description //TODO
 */
public class WordcountDriver {


    public static void main(String[] args)
            throws IOException, ClassNotFoundException, InterruptedException {

//        HDFSClient hdfsClient = new HDFSClient();
//
//        // 创建
//        hdfsClient.mkdirs("/0121/input");

//        HDFSClient  hdfsClient = new HDFSClient();
        // 上传
//        hdfsClient.uploadToHDFS("D:\\hadoop-wk\\hadoop-root\\hadoop-test\\input\\hello.txt", "/0121/input/hello.txt");


        Configuration cfg = new Configuration();
//        cfg.set("fs.defaultFS", "hdfs://hadoop101:9000");
        // 获取job对象
        Job job = Job.getInstance(cfg);

        // 设置jar存储位置
        job.setJarByClass(WordcountDriver.class);

        // 关联Map和Reduce类
        job.setMapperClass(WordcountMapper.class);
        job.setReducerClass(WordcountReducer.class);

        // 设置Mapper阶段输出数据的key和value类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);


        // 设置最终数据输出的key和value类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        // 设置输入路径和输出路径
        // 本地
        String localPath = "D:\\hadoop-wk\\hadoop-root\\hadoop-test\\{input/*}";
//        String localPath = "/0121/input";
        FileInputFormat.setInputPaths(job, new Path(localPath));

        FileOutputFormat.setOutputPath(job, new Path("D:\\hadoop-wk\\hadoop-root\\hadoop-test\\output"));

        // 提交job
//        job.submit();
        boolean waitForCompletion = job.waitForCompletion(true);

    }
}
