package com.chief.hadoop.mr.zip;

import com.chief.hadoop.mr.join.OrderBean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.Test;

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

public class OrderDriver {


    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException, URISyntaxException {
        Configuration configuration = new Configuration();
        configuration.set("mapreduce.job.jar", "E:\\bigdata-start\\hadoop3\\hadoopCode\\target\\hadoopCode-1.0-SNAPSHOT.jar");
        configuration.set("mapreduce.app-submission.cross-platform", "true");

        // 开启map端输出压缩
        configuration.setBoolean("mapreduce.map.output.compress", true);
        // 设置map端输出压缩方式
        configuration.setClass("mapreduce.map.output.compress.codec", BZip2Codec.class, CompressionCodec.class);


        FileSystem fileSystem = ViewFileSystem.newInstance(new URI("viewfs://mycluster/"), configuration);
        fileSystem.delete(new Path("/data1/mapJoinOut"), true);

        Job job = Job.getInstance(configuration, "all");

        job.setJarByClass(OrderDriver.class);

        job.setMapperClass(OrderMapper.class);
        job.setMapOutputKeyClass(OrderBean.class);
        job.setMapOutputValueClass(OrderBean.class);

        job.addCacheFile(URI.create("/data1/goods.txt"));
//        job.addCacheFile(URI.create("file:///E:/bigdata-start/hadoop3/hadoopCode/src/main/java/com/chief/hadoop/mr/join/goods.txt"));


        //不需要Reduce也可以，但是不能排序
        //job.setNumReduceTasks(0);

        job.setReducerClass(OrderReduce.class);
        job.setOutputKeyClass(OrderBean.class);
        job.setOutputValueClass(NullWritable.class);

        FileInputFormat.setInputPaths(job, new Path("/data1/order.txt"));
        FileOutputFormat.setOutputPath(job, new Path("/data1/mapJoinOut"));
//        FileInputFormat.setInputPaths(job,  new Path("E:\\bigdata-start\\hadoop3\\hadoopCode\\src\\main\\java\\com\\chief\\hadoop\\mr\\join\\order.txt"));
//        FileOutputFormat.setOutputPath(job, new Path("F:\\data1/mapJoinOut"));

        // 设置reduce端输出压缩开启
        FileOutputFormat.setCompressOutput(job, true);
        // 设置压缩的方式
        FileOutputFormat.setOutputCompressorClass(job, BZip2Codec.class);

        boolean b = job.waitForCompletion(true);
        System.exit(b ? 0 : 1);
    }

    @Test
    public void upload() throws URISyntaxException, IOException {
        FileSystem fileSystem = ViewFileSystem.newInstance(new URI("viewfs://mycluster/"), new Configuration());
        fileSystem.copyFromLocalFile(false, true, new Path("E:\\bigdata-start\\hadoop3\\hadoopCode\\src\\main\\java\\com\\chief\\hadoop\\mr\\join\\goods.txt"), new Path(("/data1/goods.txt")));
        fileSystem.copyFromLocalFile(false, true, new Path("E:\\bigdata-start\\hadoop3\\hadoopCode\\src\\main\\java\\com\\chief\\hadoop\\mr\\join\\order.txt"), new Path(("/data1/order.txt")));
    }
}


