package com.chief.hadoop.mr.outputFormat.chain;


import com.chief.hadoop.mr.outputFormat.FlowBean;
import com.chief.hadoop.mr.outputFormat.FlowMapper;
import com.chief.hadoop.mr.outputFormat.FlowOutPutFormat;
import com.chief.hadoop.mr.outputFormat.FlowReduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
import org.apache.hadoop.mapreduce.lib.chain.ChainReducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.Test;

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

//将手机号放入sjh.txt,固话放入gh.txt，统计每个的消耗总流量和出现的次数,按照流量和次数倒序排列，格式为XXXX-----123-----1
public class FlowDriver {
    //upload

    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException, URISyntaxException {
        Configuration configuration = new Configuration();
        configuration.set("mapreduce.job.jar", "E:\\bigdata-start\\hadoop3\\hadoopCode\\target\\hadoopCode-1.0-SNAPSHOT.jar");
        configuration.set("mapreduce.app-submission.cross-platform", "true");

        FileSystem fileSystem = ViewFileSystem.newInstance(new URI("viewfs://mycluster/"), configuration);
        fileSystem.delete(new Path("/data1/sj.txt"), true);
        fileSystem.delete(new Path("/data1/gh.txt"), true);
        fileSystem.delete(new Path("/data1/dependentJob1Out"), true);
        fileSystem.delete(new Path("/data1/dependentJob2Out"), true);

        Job job = Job.getInstance(configuration, "all");

        job.setJarByClass(FlowDriver.class);

        job.setOutputFormatClass(FlowOutPutFormat.class);

        ChainMapper.addMapper(job, FlowMapper.class, LongWritable.class, Text.class, Text.class, FlowBean.class, new Configuration(false));

        ChainReducer.setReducer(job, FlowReduce.class, Text.class, FlowBean.class, Text.class, FlowBean.class, new Configuration(false));

        ChainReducer.addMapper(job, com.chief.hadoop.mr.outputFormat.chain.FlowMapper.class, Text.class, FlowBean.class, Text.class, FlowBean.class
                , new Configuration(false));

        FileInputFormat.setInputPaths(job, new Path("/data1/outputFormat*.txt"));
        FileOutputFormat.setOutputPath(job, new Path("/data1/dependentJob1Out"));

        boolean b = job.waitForCompletion(true);
        System.exit(b ? 0 : 1);

    }

    @Test
    public void upload() throws URISyntaxException, IOException {
        FileSystem fileSystem = ViewFileSystem.newInstance(new URI("viewfs://mycluster/"), new Configuration());
        fileSystem.copyFromLocalFile(false, true, new Path("E:\\bigdata-start\\hadoop3\\hadoopCode\\src\\main\\java\\com\\chief\\hadoop\\mr\\outputFormat\\flow.txt"), new Path(("/data1" +
                "/outputFormat1.txt")));
        fileSystem.copyFromLocalFile(false, true, new Path("E:\\bigdata-start\\hadoop3\\hadoopCode\\src\\main\\java\\com\\chief\\hadoop\\mr\\outputFormat\\flow.txt"), new Path(("/data1" +
                "/outputFormat2.txt")));
        fileSystem.copyFromLocalFile(false, true, new Path("E:\\bigdata-start\\hadoop3\\hadoopCode\\src\\main\\java\\com\\chief\\hadoop\\mr\\outputFormat\\flow.txt"), new Path(("/data1" +
                "/outputFormat3.txt")));
    }


}
