package mapReduce.util;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
import org.apache.hadoop.mapreduce.lib.chain.ChainReducer;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.apache.hadoop.mapreduce.lib.partition.InputSampler;
import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;

import java.io.IOException;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;

/**
 * @Author:lixinlei
 * @Date:2022/3/14 8:43
 **/
public class JobSubmit {

    private static String seperator = "\t";

    private static int lineSum = 1;

    private static long maxSplitSize = 134217728;

    public static String getSeperator() {
        return seperator;
    }

    public static void setSeperator(String seperator) {
        JobSubmit.seperator = seperator;
    }

    public static int getLineSum() {
        return lineSum;
    }

    public static void setLineSum(int lineSum) {
        JobSubmit.lineSum = lineSum;
    }

    public static long getMaxSplitSize() {
        return maxSplitSize;
    }

    public static void setMaxSplitSize(long maxSplitSize) {
        JobSubmit.maxSplitSize = maxSplitSize;
    }

    /**
     * 封装基础的提交任务的方法
     * @param driver 当前业务逻辑的Driver类类型
     * @param args 主方法提供的参数
     */
    public static void submitBaseJob(
            Class driver,
            String[] args,
            Object... objs){
        submitJobByCriticalPoint(driver,args,false,null,objs);
    }

    /**
     * 利用自动设置临界点的方式提交job
     * @param driver 驱动类
     * @param args 主方法的参数
     * @param autoFlag 是否执行自动获取临界点的标识
     * @param sampleArgs 样本的参数
     *                   此参数可以为字符串，表示不执行抽样，直接设置临界点的分区文件
     *                   此参数可以为 Object[freq,numSample,maxSpilt]，表示走随机抽样，动态获取临界点后写入分区文件
     * @param objs 不定长参数，额外的功能添加
     */
    public static void submitJobByCriticalPoint(
            Class driver,
            String[] args,
            Boolean autoFlag,
            Object sampleArgs,
            Object... objs){
        submitJobAll(driver,args,autoFlag,sampleArgs,null,null,objs);
    }

    /**
     * 提交链式MR的调用
     * @param args
     * @param mapperClasses
     * @param reducerClasses
     * @param objs
     */
    public static void submitChainJob(String[] args,Class[] mapperClasses,Class[] reducerClasses,Object... objs){
        submitJobAll(
                null,
                args,
                false,
                null,
                mapperClasses,
                reducerClasses,
                objs
        );
    }

    /**
     * @param driver
     * @param args
     * @param autoFlag
     * @param sampleArgs
     * @param mapperClasses
     * @param reducerClasses
     * @param objs
     */
    private static void submitJobAll(
            Class driver,
            String[] args,
            Boolean autoFlag,
            Object sampleArgs,
            Class[] mapperClasses,
            Class[] reducerClasses,
            Object... objs){

        try {
            //Job.getInstance 无参方法在内部默认创建了一个Configuration，代表一切配置走默认
            //如果在当前逻辑下需要自定义一些配置项，需要自己创建Configuration。
            Configuration conf = new Configuration();
            Job job = Job.getInstance(conf);

            if(driver!=null){
                //设置Driver类
                job.setJarByClass(driver);

                //获取当前Driver类的所有内部类
                Class[] innerClasses = driver.getClasses();

                //遍历所有内部类，确定 Mapper 和 Reducer
                for (Class innerClass : innerClasses) {

                    //获取当前内部类的父类带泛型的类型，正常是返回Type类型
                    //但是现在需要的是类型中的泛型，所以用Type的子接口ParameterizedType接收
                    ParameterizedType paraType = (ParameterizedType)innerClass.getGenericSuperclass();

                    //获取当前内部类的所有泛型，并以Type[]形式返回
                    Type[] types = paraType.getActualTypeArguments();

                    //如果此时为Mapper类
                    if (Mapper.class.isAssignableFrom(innerClass)) {
                        job.setMapperClass(innerClass);
                        job.setMapOutputKeyClass((Class)types[2]);
                        //types[3].getTypeName() 必须使用 jdk8 的编译环境
                        job.setMapOutputValueClass(Class.forName(types[3].getTypeName()));
                    }else if(Reducer.class.isAssignableFrom(innerClass)){
                        //如果此时为Reducer类
                        job.setReducerClass(innerClass);
                        job.setOutputKeyClass((Class)types[2]);
                        job.setOutputValueClass((Class)types[3]);
                    }
                }
            }else if(driver==null && null!=mapperClasses && mapperClasses.length>0
                    && null!=reducerClasses && reducerClasses.length>0){
                //如果符合driver为空，且mapper类和reducer类的数组都有值，执行链式MR
                //遍历所有的Mapper类
                for (Class mapperClass : mapperClasses) {

                    //获取当前Mapper类的所有泛型
                    Type[] types = getTypesByClass(mapperClass);

                    //把所有的Mapper添加到Mapper链当中
                    ChainMapper.addMapper(
                            job,
                            mapperClass,
                            (Class)types[0],
                            (Class)types[1],
                            (Class)types[2],
                            (Class)types[3],
                            job.getConfiguration()
                    );
                }


                //初始化一个存放Reduce端Mapper类的列表
                List<Class> mapperClassList = new ArrayList<Class>();

                //遍历所有的Reducer类
                for (Class reducerClass : reducerClasses) {

                    //获取当前Reducer类的所有泛型
                    Type[] types = getTypesByClass(reducerClass);

                    //如果是reduce端的Mapper类，先暂存在列表中，为了容错
                    if(Mapper.class.isAssignableFrom(reducerClass)){
                        mapperClassList.add(reducerClass);
                    }else if(Reducer.class.isAssignableFrom(reducerClass)){
                        //如果是真正的Reducer类，直接添加到Reducer链中
                        ChainReducer.setReducer(
                                job,
                                reducerClass,
                                (Class)types[0],
                                (Class)types[1],
                                (Class)types[2],
                                (Class)types[3],
                                job.getConfiguration()
                        );
                    }
                }

                //由于已经添加完Reducer类，后续可以添加reduce端的Mapper类
                for (Class mapperClass_Reduce : mapperClassList) {

                    Type[] types = getTypesByClass(mapperClass_Reduce);

                    ChainReducer.addMapper(
                            job,
                            mapperClass_Reduce,
                            (Class)types[0],
                            (Class)types[1],
                            (Class)types[2],
                            (Class)types[3],
                            job.getConfiguration()
                    );
                }
            }

            String sortOrGroupFlag = "sort";

            //遍历附加条件，进行设置
            //前提条件：如果在此区域设置的内容，必须是外部类才可以
            for (Object obj : objs) {
                if(obj instanceof Integer){
                    //如果是int类型数据，设置reduce的数量
                    job.setNumReduceTasks((Integer)obj);
                }else if(obj instanceof String){
                    sortOrGroupFlag = (String)obj;
                }else if(obj instanceof URI[]){
                    //如果是URI数组类型，设置全局的缓冲文件
                    job.setCacheFiles((URI[])obj);
                }else if(Partitioner.class.isAssignableFrom((Class)obj)){
                    //如果父类是Partitioner类，设置自定义分区类
                    job.setPartitionerClass((Class)obj);
                }else if(WritableComparator.class.isAssignableFrom((Class)obj)){
                    //如果父类是WritableComparator类，且标识为sort或者不传递标识,设置自定义排序类
                    if(sortOrGroupFlag.equals("sort")){
                        job.setSortComparatorClass((Class)obj);
                    }else if(sortOrGroupFlag.equals("group")){
                        //如果父类是WritableComparator类，且标识为group,设置自定义分组类
                        job.setGroupingComparatorClass((Class)obj);
                    }

                }else if(Reducer.class.isAssignableFrom((Class)obj)){
                    //如果父类是Reducer类，设置Combiner合并类
                    job.setCombinerClass((Class)obj);
                }else if(InputFormat.class.isAssignableFrom((Class)obj)){
                    //设置输入格式化类
                    job.setInputFormatClass((Class)obj);

                }else if(OutputFormat.class.isAssignableFrom((Class)obj)){
                    //设置输出格式化类
                    job.setOutputFormatClass((Class)obj);
                }
            }

            //设置keyValueFileInput格式的分隔符
            job.getConfiguration().set(
                    KeyValueLineRecordReader.KEY_VALUE_SEPERATOR,
                    getSeperator()
            );

            //设置NLine格式的行数
            NLineInputFormat.setNumLinesPerSplit(job,getLineSum());

            //设置Combine格式的最大分隔字节数
            CombineFileInputFormat.setMaxInputSplitSize(job,getMaxSplitSize());


            //2 指定两个路径(输入和输出)
            FileInputFormat.setInputPaths(job, new Path(args[0]));
//            FileInputFormat.addInputPath(job,new Path(args[2]));

            //创建文件系统的连接对象
            FileSystem fs = FileSystem.get(job.getConfiguration());
            //初始化输出路径
            Path outputPath = new Path(args[1]);
            //判断输出路径是否存在，如果存在就删除
            if (fs.exists(outputPath)) {
                fs.delete(outputPath,true);
            }

            FileOutputFormat.setOutputPath(job, outputPath);

            //如果标识为true，表示需要执行自动获取临界点逻辑
            if(autoFlag){
                //因为随机抽样需要用到序列化文件，所以需要格式化输入文件类型
                job.setInputFormatClass(SequenceFileInputFormat.class);

                //设置自定义分区类(由hadoop给我们提供)
                job.setPartitionerClass(TotalOrderPartitioner.class);
                //设置reduceTask的数量(分区的数量)
                //可以有外层的 Object... 进行设置
//                job.setNumReduceTasks(4);

                //如果这个参数是Object数组 标识传过来的是 freq numsample maxSplit
                if(sampleArgs instanceof Object[]){

                    //把这个参数先强转成数组类型
                    Object[] sampleArray = (Object[])sampleArgs;

                    //获取样本
                    InputSampler.Sampler sampler =
                            new InputSampler.RandomSampler(
                                    (Double)sampleArray[0],
                                    (Integer)sampleArray[1],
                                    (Integer)sampleArray[2]
                            );

                    //通过给定的样本，计算出分区临界点，把临界点写入分区文件中
                    InputSampler.writePartitionFile(job,sampler);

                }else if(sampleArgs instanceof String){
                    //如果是字符串，直接设置文件路径
                    TotalOrderPartitioner.setPartitionFile(job.getConfiguration(),new Path((String)sampleArgs));
                }
            }

            //1 提交job任务
            job.waitForCompletion(true);

        }catch (Exception e){
            e.printStackTrace();
        }
    }

    private static Type[] getTypesByClass(Class cla){
        ParameterizedType paraType = (ParameterizedType)cla.getGenericSuperclass();
        Type[] types = paraType.getActualTypeArguments();
        return types;
    }

}
