package avgCitySalary;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


import java.io.IOException;

public class AvgSalaryDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);

        job.setJarByClass(AvgSalaryDriver.class);
        job.setMapperClass(AvgSalaryMapper.class);
        job.setReducerClass(AvgSalaryReducer.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(DoubleWritable.class);
        job.setMapOutputValueClass(DoubleWritable.class);
        job.setMapOutputKeyClass(Text.class);
        // 设置自定义分区类
         job.setPartitionerClass(CityAvgSalaryPartition.class);
        // 设置Reduce任务的数量，也就是分区数量，这里设置为2个分区，要和分区类中的逻辑对应
        job.setNumReduceTasks(14);

        FileInputFormat.addInputPath(job, new Path("input/jobData.csv"));
        Path outputPath = new Path("output/avgCitySalary");

        //获取文件系统对象fs。
        //如果输出路径已经存在，则删除该路径及其所有内容。
        FileSystem fs = FileSystem.get(conf);
        if (fs.exists(outputPath)) {
            fs.delete(outputPath, true);
        }


        //设置输出路径为outputPath。
        FileOutputFormat.setOutputPath(job, outputPath);

        System.exit(job.waitForCompletion(true)? 0 : 1);
    }
}

