package com.shujia.mr.kqzl2;

import com.shujia.mr.kqzl1.KqzkDemo1;
import com.shujia.mr.kqzl1.KqzlMapper;
import com.shujia.mr.kqzl1.KqzlReducer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/*
    输入的数据：
        20180101-1279   128
        1279,高新区,合肥

    reduce之前的分组数据：
        <"1279,"#合肥-高新区>
        <"1279","$20180101-128">
        <"1279","$20180102-24">
        <"1279","$20180103-241">
        <"1279","$20180104-217">
        ...
        分组后：
        <"1279", ["$20180103-241","$20180104-217","#合肥-高新区","$20180101-128","$20180102-24"]>

 */
public class KqzlDemo2 {
    public static void main(String[] args) {
        try {
            //创建hadoop配置文件对象
            Configuration conf = new Configuration();

            //创建MapReduce作业对象
            Job job = Job.getInstance(conf);
            //设置mr作业的名字
            job.setJobName("33期空气质量案例2");
            //设置reduce的个数
//            job.setNumReduceTasks(2);

            //设置mr作业的主类
            job.setJarByClass(KqzlDemo2.class);

            //设置map类
            job.setMapperClass(KqzlMapper2.class);
            //设置reduce类
            job.setReducerClass(KqzlReducer2.class);

            //设置map任务的输出数据类型
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(Text.class);

            //设置reduce任务的输出类型
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);

            // 设置读取hdfs文件的路径
//            FileInputFormat.addInputPath(); //添加某一个目录
//            FileInputFormat.setInputPaths(); // 可以添加多个目录
//            FileInputFormat.setInputPaths(job, new Path("/xxx/xxx/xx"));
            // 路径1：/bigdata33/output/out10/part-r-00000
            // 路径2：/bigdata33/data/city.csv
            FileInputFormat.setInputPaths(job, new Path(args[0]), new Path(args[1]));

            //设置输出的hdfs目录路径
            FileOutputFormat.setOutputPath(job,new Path(args[2]));

            //提交作业到yarn中运行
            boolean b = job.waitForCompletion(true);

            if(b){
                System.out.println("========33期空气质量案例2执行成功！！！！========");
                System.out.println("输出结果目标在hdfs路径："+args[2]);
            }else {
                System.out.println("-------------- 执行失败！！ -----------------");
            }


        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
