package com.chief.hadoop.mr.outputFormat.dependent;


import com.chief.hadoop.mr.outputFormat.FlowBean;
import com.chief.hadoop.mr.outputFormat.FlowMapper;
import com.chief.hadoop.mr.outputFormat.FlowOutPutFormat;
import com.chief.hadoop.mr.outputFormat.FlowReduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
import org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.Test;

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

//将手机号放入sjh.txt,固话放入gh.txt，统计每个的消耗总流量和出现的次数,按照流量和次数倒序排列，格式为XXXX-----123-----1
public class FlowDriver {
    //upload

    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException, URISyntaxException {
        Configuration configuration = new Configuration();
        configuration.set("mapreduce.job.jar", "E:\\bigdata-start\\hadoop3\\hadoopCode\\target\\hadoopCode-1.0-SNAPSHOT.jar");
        configuration.set("mapreduce.app-submission.cross-platform", "true");

        FileSystem fileSystem = ViewFileSystem.newInstance(new URI("viewfs://mycluster/"), configuration);
        fileSystem.delete(new Path("/data1/sj.txt"), true);
        fileSystem.delete(new Path("/data1/gh.txt"), true);
        fileSystem.delete(new Path("/data1/dependentJob1Out"), true);
        fileSystem.delete(new Path("/data1/dependentJob2Out"), true);

        Job job1 = getJob1(configuration);

        Job job2 = getJob2(configuration);

        ControlledJob controlledJob1 = new ControlledJob(job1.getConfiguration());
        controlledJob1.setJob(job1);

        ControlledJob controlledJob2 = new ControlledJob(job2.getConfiguration());
        controlledJob2.setJob(job2);

        controlledJob2.addDependingJob(controlledJob1);

        JobControl all = new JobControl("all");
        all.addJob(controlledJob1);
        all.addJob(controlledJob2);

        Thread thread = new Thread(all);
        thread.start();

        while (true) {
            if (all.allFinished()) {
                System.out.println(all.getSuccessfulJobList());
                all.stop();
                break;
            }
            if (!all.getFailedJobList().isEmpty()) {
                System.out.println(all.getFailedJobList());
                all.stop();
                break;
            }
        }
    }

    private static Job getJob2(Configuration configuration) throws IOException {
        Job job = Job.getInstance(configuration, "job2");
        job.setJarByClass(FlowDriver.class);

        job.setMapperClass(com.chief.hadoop.mr.outputFormat.dependent.FlowMapper.class);
        job.setMapOutputKeyClass(FlowBean.class);
        job.setMapOutputValueClass(Text.class);

        job.setReducerClass(com.chief.hadoop.mr.outputFormat.dependent.FlowReduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);

        job.setOutputFormatClass(FlowOutPutFormat.class);

        FileInputFormat.setInputPaths(job, new Path("/data1/sj.txt"), new Path("/data1/gh.txt"));
        FileOutputFormat.setOutputPath(job, new Path("/data1/dependentJob2Out"));
        return job;
    }

    private static Job getJob1(Configuration configuration) throws IOException {
        Job job = Job.getInstance(configuration, "job1");
        job.setJarByClass(FlowDriver.class);

        job.setMapperClass(FlowMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowBean.class);

        job.setReducerClass(FlowReduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);

        job.setOutputFormatClass(FlowOutPutFormat.class);

        FileInputFormat.setInputPaths(job, new Path("/data1/outputFormat*.txt"));
        FileOutputFormat.setOutputPath(job, new Path("/data1/dependentJob1Out"));
        return job;
    }

    @Test
    public void upload() throws URISyntaxException, IOException {
        FileSystem fileSystem = ViewFileSystem.newInstance(new URI("viewfs://mycluster/"), new Configuration());
        fileSystem.copyFromLocalFile(false, true, new Path("E:\\bigdata-start\\hadoop3\\hadoopCode\\src\\main\\java\\com\\chief\\hadoop\\mr\\outputFormat\\flow.txt"), new Path(("/data1" +
                "/outputFormat1.txt")));
        fileSystem.copyFromLocalFile(false, true, new Path("E:\\bigdata-start\\hadoop3\\hadoopCode\\src\\main\\java\\com\\chief\\hadoop\\mr\\outputFormat\\flow.txt"), new Path(("/data1" +
                "/outputFormat2.txt")));
        fileSystem.copyFromLocalFile(false, true, new Path("E:\\bigdata-start\\hadoop3\\hadoopCode\\src\\main\\java\\com\\chief\\hadoop\\mr\\outputFormat\\flow.txt"), new Path(("/data1" +
                "/outputFormat3.txt")));
    }


}
