package com.shujia.wyh.kqzldemo;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

class PM25Mapper extends Mapper<LongWritable, Text, Text, Text> {
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
        //date,hour,type,1001A,1002A,1003A
        //20180410,0,PM2.5,94,109,91,90
        //过滤标题
        //方式1：key只要是0就代表是第一行标题
        ////第二次过滤：还要保证检测的是PM2.5的值
        String[] strings = value.toString().split(",");
        String date = strings[0];
        if (strings.length > 4 && key.get() != 0 && "PM2.5".equals(strings[2])) {
            for (int i = 3, j = 1001; i < strings.length; i++, j++) {
                //处理空值
                if (strings[i] == null || "".equals(strings[i])) {
                    strings[i] = "0";
                }
                context.write(new Text("日期:" + date + "-监测点:" + j), new Text(strings[i]));
            }
        }

        //方式2：将第一列切分出来，判断是否是date字符串，如果是就代表是第一行
//        if(!"date".equals(value.toString().split(",")[0])){
//
//        }

    }
}
//<日期:20180101-监测点:1001,"52">
//<日期:20180101-监测点:1001,"66">
//<日期:20180101-监测点:1001,"0">
//...

//<日期:20180101-监测点:1001,["52","66","0","0",....]>
class PM25Reducer extends Reducer<Text, Text, Text, LongWritable> {
    @Override
    protected void reduce(Text key, Iterable<Text> values, Reducer<Text, Text, Text, LongWritable>.Context context) throws IOException, InterruptedException {
        long sum = 0L;
        int count = 0;

        boolean flag = false;

        for (Text value : values) {
            String n = value.toString();
            if(!"0".equals(n)){
                count++;
                sum += Long.parseLong(n);
                flag = true;
            }
        }
        if(flag){
            context.write(key, new LongWritable(sum / count));
        }else {
            context.write(key, new LongWritable(0L));
        }

    }
}


public class PM25Demo {
    public static void main(String[] args) throws Exception {
        //获取hadoop集群环境配置
        Configuration conf = new Configuration();

        //创建Job作业
        Job job = Job.getInstance(conf);

        //设置job作业的名称
        job.setJobName("统计每天的PM2.5的平均值");

        //设置job作业reduce的个数
        job.setNumReduceTasks(1);

        //设置主类
        job.setJarByClass(PM25Demo.class);

        //设置Map类
        job.setMapperClass(PM25Mapper.class);
        //设置Map类的输出key-value类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        //设置Reduce类
        job.setReducerClass(PM25Reducer.class);
        //设置reduce的输出key-value类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);

        //设置数据输入来源
        //这里的Path不仅可以是一个文件的路径，还可以是一个目录文件夹的路径
        //如果传入的文件夹，将读取文件夹中所有的文件
        FileInputFormat.addInputPath(job, new Path(args[0]));
        //设置reduce结果输出的路径
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        //将job作业提交到yarn上执行
        job.waitForCompletion(true);

    }
}
