package cn.lyjuan.first.hadoop.demo.ch04;

import cn.lyjuan.base.util.RandomUtils;
import cn.lyjuan.first.hadoop.demo.enums.ChNameEnum;
import cn.lyjuan.first.hadoop.util.FileUtil;
import cn.lyjuan.first.hadoop.util.HDFSUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;

/**
 * 求平均值
 */
@Slf4j
public class Ch04S05Average {

    /**
     * 数据处理任务:<br/>
     * 1. 解析数据<br/>
     * <p>
     * Mapper使用 InputFormat 将原始数据集分解为小数据集(InputSlit)，默认为 TextInputFormat。
     * 再使用 RecordReader 将InputSlit分解为\<key,value\>的形式，默认为 LineRecordReader。
     * Map输出的结果partition分发到Reducer。
     * Reducer完成{@code reducer}操作后，使用 OutputFormat 输入结果。
     * </p>
     * <p>
     * Notes: Mapper输入跟Reduce输入输出必须一致
     * </p>
     */
    public static class AvgMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
        private static Text name = new Text();
        private static IntWritable score = new IntWritable();

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            // 输入的数据按行分隔
            StringTokenizer tokenizerArticle = new StringTokenizer(value.toString(), "\n");
            while (tokenizerArticle.hasMoreElements()) {
                // 行按StringTokenizer默认分隔
                StringTokenizer tokenizerLine = new StringTokenizer(tokenizerArticle.nextToken());
                String name = tokenizerLine.nextToken();
                String score = tokenizerLine.nextToken();
                context.write(new Text(name), new IntWritable(Integer.parseInt(score)));
                log.info("{} ==> {}, {}", value, name, score);
            }
        }
    }

    /**
     * 数据合并、排序<br/>
     * 利用 Mapper输出Key为使用Hadoop排序功能，默认为升序
     */
    public static class AvgReduce extends Reducer<Text, IntWritable, Text, IntWritable> {
        private static IntWritable avgScore = new IntWritable();

        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int sum = 0;
            int count = 0;
            for (IntWritable v : values) {
                sum += v.get();
                count++;
            }
            avgScore.set(sum / count);
            context.write(key, avgScore);
        }
    }

    public static final Path OUT_PATH = FileUtil.remoteURIPath(ChNameEnum.CH04, Ch04S05Average.class, "result");

    public static final List<Path> inFiles = new ArrayList<>();

    /**
     * 生成HDFS源数据文件
     */
    public static void loadInputFiles() {
        int fileCount = RandomUtils.randomInt(1);
        Path remoteDir = new Path(FileUtil.remotePath(ChNameEnum.CH04, Ch04S05Average.class));
        HDFSUtil.del(remoteDir);// 清除旧数据
        String[] firstNames = {"李", "张", "王", "刘", "孙", "陈", "丁", "钱"};
        String[] lastNames = {"麻", "全", "蛋", "路", "合", "德", "华"};
        Path path = null;
        for (int i = 0; i < fileCount; i++) {
            path = FileUtil.remoteURIPath(ChNameEnum.CH04, Ch04S05Average.class, String.valueOf(i));
            inFiles.add(path);
            StringBuilder sb = new StringBuilder();
            for (String f : firstNames) {
                for (String l : lastNames) {
                    sb.append("\n").append(f).append(l).append(" ").append(RandomUtils.randomInt(2));
                }
            }
            sb.deleteCharAt(0);
            HDFSUtil.writeRemote(path, sb.toString());
            System.out.println("write remote file: " + path.toString());
        }
    }

    public static void main(String[] args) throws Exception {
        // 加载源文件
        loadInputFiles();

//        HDFSUtil.conf().set("mapred.job.tracker", "192.168.1.200:9001");

        Job job = new Job(HDFSUtil.conf(), "sort");
        job.setUser(HDFSUtil.USER);
        job.setJarByClass(Ch04S05Average.class);
        job.setMapperClass(AvgMapper.class);
        job.setReducerClass(AvgReduce.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        /**
         * 将输入的数据庥侵害成小数据块splites，提供一个RecordReader的实现
         */
        job.setInputFormatClass(TextInputFormat.class);
        /**
         * 提供一个RecordWriter的实现，负责数据输出
         */
        job.setOutputFormatClass(TextOutputFormat.class);

        for (Path i : inFiles)
            FileInputFormat.addInputPath(job, i);
        FileOutputFormat.setOutputPath(job, OUT_PATH);
        boolean isOk = job.waitForCompletion(true);
        System.out.println("result ==> " + isOk);
        if (!isOk) return;
        // 成功时生成结果文件，报错时不生成任何文件
        FileStatus[] results = HDFSUtil.ls(OUT_PATH);
        for (FileStatus f : results) {
            System.out.println("====== " + f.getPath().getName() + " ======");
            System.out.println(new String(HDFSUtil.readRemote(f.getPath())));
            System.out.println();
            System.out.println();
        }
    }
}
