package com.mapreduce;

import com.bean.AvgTemp;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
import org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.net.URI;

public class JobControlMapReduce
        extends Configured implements Tool {
    @Override
    public int run(String[] args) throws Exception {
        Configuration conf = getConf();
        conf.set("mapreduce.input.keyvaluelinerecordreader.key.value.separator",",");

        // 指定输入输出路径
        Path input1 = new Path(
                "hdfs://192.168.10.11:9000/sortdata");
        Path output1 = new Path(
                "hdfs://192.168.10.11:9000/snoutput");

        FileSystem fs = FileSystem.get(
                new URI("hdfs://192.168.10.11:9000")
                ,conf);
        if (fs.exists(output1)) fs.delete(output1,true);

        //构建Job
        Job job1 = Job.getInstance(conf);
        job1.setJobName("avg1");
        job1.setJarByClass(this.getClass());
        // 指定map类的配置
        job1.setMapperClass(JobNumSum.JobNumSumMapper.class);
        job1.setMapOutputKeyClass(Text.class);
        job1.setMapOutputValueClass(AvgTemp.class);
        // 指定reduce类的配置
        job1.setReducerClass(JobNumSum.JobNumSumReducer.class);
        job1.setOutputKeyClass(Text.class);
        job1.setOutputValueClass(AvgTemp.class);

        job1.setInputFormatClass(KeyValueTextInputFormat.class);
        KeyValueTextInputFormat.addInputPath(job1,input1);

        job1.setOutputFormatClass(SequenceFileOutputFormat.class);
        SequenceFileOutputFormat.setOutputPath(job1,output1);

        // 指定输入输出路径
        Path output2 = new Path(
                "hdfs://192.168.10.11:9000/avgoutput");

        if (fs.exists(output2)) fs.delete(output2,true);
        //构建Job
        Job job2 = Job.getInstance(conf);
        job2.setJobName("avg2");
        job2.setJarByClass(this.getClass());
        // 指定map类的配置
        job2.setMapperClass(JobAvg.JobAvgMapper.class);
        job2.setMapOutputKeyClass(Text.class);
        job2.setMapOutputValueClass(DoubleWritable.class);

        job2.setNumReduceTasks(0);

        job2.setInputFormatClass(SequenceFileInputFormat.class);
        SequenceFileInputFormat.addInputPath(job2,output1);

        TextOutputFormat.setOutputPath(job2,output2);

        // 将作业流转换为可控作业流
        ControlledJob cj1 = new ControlledJob(conf);
        cj1.setJob(job1);
        ControlledJob cj2 = new ControlledJob(conf);
        cj2.setJob(job2);
        // 设置job2依赖于job1
        cj2.addDependingJob(cj1);

        // 创建控制作业流
        JobControl jc = new JobControl("avgtemp");
        jc.addJob(cj1);
        jc.addJob(cj2);
        // 使用多线程启动作业流
        Thread thread = new Thread(jc);
        thread.start();
        // 打印当前作业流的运行时日志
        while(true){
            for (ControlledJob job:jc.getRunningJobList()){
                job.getJob().monitorAndPrintJob();
            }
            if (jc.allFinished()) break;
        }
        return 0;
    }

    public static void main(String[] args) throws Exception {
        System.exit(ToolRunner.run(new JobControlMapReduce(),args));
    }
}
