package com.jida.hadoop.mr.LCGM11;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class LifeCycleJudgeMain {
    public static void main(String[] args) throws IOException, InterruptedException{
        try {
            String[] arg = new String[5];
            arg[0] = "3";
            arg[1] = "file:///D://测试数据//dx";
            arg[2] = "20210601";
            arg[3] = "file:///D://测试数据//dx//10.DxSaleUser";
            arg[4] = "file:///D://测试数据//dx//11.DxLiveTime";
            //创建配置信息
            Configuration conf = new Configuration();
            //map内存设置
            conf.set("mapreduce.map.memory.mb", "5120");
            conf.set("mapreduce.reduce.memory.mb", "5120");
            conf.set("cyc", arg[0]);
            //不检查超时，由于集群环境不稳定有超时现象，所以设置为不检查，但需要保证程序无死循环
            //conf.set("mapred.task.timeout", "0");
            //集群机器少的时候可以设置：客户端在写失败的时候，是否使用更换策略
            //conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER");
            //conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true");
            // 获取命令行的参数
            //String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
            // 创建任务
            Job job = new Job(conf, "LifeCycleJudgeMain");
            // 打成jar包运行，这句话是关键
            job.setJarByClass(LifeCycleJudgeMain.class);
            // 自定义Mapper类和设置map函数输出数据的key和value的类型
            job.setMapperClass(LifeCycleJudgeMapper.class);
            job.setReducerClass(LifeCycleJudgeReducer.class);
            //设置map输出的key类型
            job.setMapOutputKeyClass(Text.class);
            //设置map输出的value类型
            job.setMapOutputValueClass(Text.class);
            //设置输出的key类型
            job.setOutputKeyClass(NullWritable.class);
            //设置输出的value类型
            job.setOutputValueClass(Text.class);
            // 分组函数
            //job.setPartitionerClass(KeyPartitioner.class);
            // 分组函数
            //job.setGroupingComparatorClass(KeyGroupingComparator.class);
            //fs
            FileSystem fs = FileSystem.get(new Configuration());
            //结果数据路径
            FileStatus[] status = fs.listStatus(new Path(arg[1]));
            String pathname = "";
            int datacyc = 0;
            //循环结果数据集，取上一周期数据
            for (FileStatus file : status) {
                String pname = file.getPath().toString();
                // 日期8位数字，并且不等于当前执行周期
                if (pname.matches(".*\\/(\\d{8})$") && !pname.endsWith(arg[2])) {
                    String[] str = pname.split("\\/");
                    //找小于当前周期的最大值周期
                    if (Integer.parseInt(str[str.length - 1]) > datacyc &&
                            Integer.parseInt(str[str.length - 1]) < Integer.parseInt(arg[2])) {
                        datacyc = Integer.parseInt(str[str.length - 1]);
                        pathname = pname;
                    }
                }
            }
            //加载上一周其统计数据
            if (!"".equals(pathname)) {
                FileInputFormat.addInputPath(job, new Path(pathname + "/11.DxLiveTime"));
            }
            //输入输出路径
            FileInputFormat.addInputPath(job, new Path(arg[3]));
            FileOutputFormat.setOutputPath(job, new Path(arg[4]));
            //提交作业 判断退出条件（0正常退出，1非正常退出）
            System.exit(job.waitForCompletion(true) ? 0 : 1);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}