/**
 * 控制器类，用于启动Hadoop作业，将热点列表写入HDFS。
 */
package com.liyuan.controller;

import com.liyuan.entity.ArrayInputFormat;
import com.liyuan.entity.HotSection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.log4j.Logger;

import java.io.IOException;
import java.time.LocalDate;
import java.util.concurrent.ThreadLocalRandom;

public class WriteHDFSDriver {
    private static final Logger LOG = Logger.getLogger(ArrayInputFormat.class);

    /**
     * 创建并配置一个Hadoop作业，用于将热点列表写入HDFS。
     *
     * @param hotLists 热点列表，字符串数组形式。
     * @throws IOException            当处理文件系统操作时发生错误。
     * @throws InterruptedException   当作业执行被中断时抛出。
     * @throws ClassNotFoundException 当尝试加载作业中指定的类时发生错误。
     */
    public void writeHdfsJob(String[] hotLists) throws IOException, InterruptedException, ClassNotFoundException {
        // 配置作业的初始设置
        Configuration configuration = new Configuration();
        configuration.setStrings("array.data", hotLists);
        Job job = Job.getInstance(configuration);
        job.setJarByClass(WriteHDFSDriver.class);
        // 使用ArrayInputFormat作为输入格式，设置输出路径
        job.setInputFormatClass(ArrayInputFormat.class);
        // 设置作业名称和Mapper类
        job.setJobName("myjob");
        job.setMapperClass(WriteHDFSMap.class);
        // 设置作业输出格式和reduce任务数量
        job.setNumReduceTasks(0);
        job.setMapOutputKeyClass(NullWritable.class);
        job.setMapOutputValueClass(HotSection.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(HotSection.class);
        FileOutputFormat.setOutputPath(job, new Path("hdfs://localhost:9000/123/" + LocalDate.now().toString().replace("-", "") + "/" + System.currentTimeMillis()));
        // 启动作业并等待完成
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}
