package org.example.xinguan;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.example.NumberInfo;
import org.example.ResultXinguan;

public class XinguanMain extends Configured implements Tool {
    @Override
    public int run(String[] strings) throws Exception {
        //判断输入是否正确
        if (strings.length != 2){
            System.out.println("Usage: XinguanMain <input path> <output path>");
            return -1;
        }
        //获取用户读取数据、保存数据的路径
        String inputPath = strings[0];
        String outputPath = strings[1];

        //创建实例，设置任务名称和类
        Job job = Job.getInstance(super.getConf(),"mapreduce_calculation");
        job.setJarByClass(XinguanMain.class);

        //设置输入输出类型和路径
        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,new Path("hdfs://192.168.137.128:9000"+inputPath));
        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job,new Path("hdfs://192.168.137.128:9000"+outputPath));

        //设置Map阶段的类、输入输出
        job.setMapperClass(XinguanMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(NumberInfo.class);

        //设置Reduce阶段的类、输入输出
        job.setReducerClass(XinguanReduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(ResultXinguan.class);

        boolean b = job.waitForCompletion(true);
        return b?0:1;
    }

    public static void main(String[] args) throws Exception {
        Configuration configured = new Configuration();
        ToolRunner.run(configured,new XinguanMain(),args);
    }
}
