package org.example.pro1_weather;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.example.utils.MpUtil;


/**
 * TODO
 *
 * @Version 1.0.0
 * @Date 2023/12/6 22:06
 * @Author liangfengyuan1024@gmail.com
 */
public class WordCount {
    //                                                 输入的类型                       输出的类型
    //                                                 字节偏移量            一行文本     年份         数字
    public static class WordCountMapper extends Mapper<LongWritable,       Text,       Text, DoubleWritable> {

        private final static DoubleWritable yearValue = new DoubleWritable(0);
        private final Text year = new Text();

        /**
         * @param key     字节偏移量
         * @param value   一行文本 为什么是Text类型，要能序列化和排序
         * @param context hadoop的容器，可以取出运行时的环境变量
         */
        public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            System.out.println("key:" + key + ",value:" + value);
            // 这里需要按照逗号进行分割
            String[] split = value.toString().split(",");
            year.set(split[0]);
            yearValue.set(Double.parseDouble(split[1]));
            context.write(year, yearValue);
        }
    }

    /**
     * 因为 combiner 和 reducer 都是对相同的键的数据进行规约，所以用一个类实现就可以了
     */
    public static class DoubleSumReducer extends Reducer<Text, DoubleWritable, Text, DoubleWritable> {
        private final DoubleWritable result = new DoubleWritable();
        /**
         * 做规约时返回的格式为 <word,{1,2,1}>
         *
         * @param key     单词
         * @param values  返回的结果，为列表结构，存放每一个结点计算的结果
         * @param context 上下文环境
         */
        public void reduce(Text key, Iterable<DoubleWritable> values,Context context) throws IOException, InterruptedException {
            System.out.println("reduce任务:  它的键 :" + key + ", 它的值:" + values.toString());
            double sum = 0;
            for (DoubleWritable val : values) {
                sum += val.get();
            }
            result.set(sum);
            context.write(key, result);
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        /* yarn-site.xml 中的配置 */
        Job job = Job.getInstance(conf, "word count");
        /* Job -> n个task -> container -> taskset */
        job.setJarByClass(WordCount.class);
        /* mapper操作 */
        job.setMapperClass(WordCountMapper.class);
        /* combiner操作，合并一个结点中的数据 */
        job.setCombinerClass(DoubleSumReducer.class);
        /* reduce操作，合并不同结点中的数据 */
        job.setReducerClass(DoubleSumReducer.class);
        /* 设置输出的类型 */
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(DoubleWritable.class);
        /* 设置输入、输出目录，输出目录不能存在 */
        /* 设置输入输出的目录 */
        Path inputpath = new Path("E:\\workspace\\idea\\other\\hadoop_demo\\data\\in\\rain3.txt");
        Path outpath = new Path("E:\\workspace\\idea\\other\\hadoop_demo\\data\\out");
        /* 设置需要计算的文件 */
        FileInputFormat.addInputPath(job, inputpath);
        /* 删除多余的目录 */
        MpUtil.delOutPut(conf, outpath);
        /* 设置输出目录 */
        FileOutputFormat.setOutputPath(job, outpath);
        /* 0表示正常退出，1表示错误退出 */
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}

