package com.shujia.wyh.kqzldemo;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

/*
    将需求1求出的平均值，按照监测点编号与城市表连接
 */
class PM25CityMapper extends Mapper<LongWritable, Text, Text, Text> {
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
        //日期:20180718-监测点:2604	22   ---> <"2604","$20180718-22">
        //2604,市气象局,临沧               ---> <"2604","#临沧-市气象局">
        //context是代表hadoop的上下文
        FileSplit fileSplit = (FileSplit) context.getInputSplit(); //获取当前数据来自于哪个切片
        String fileName = fileSplit.getPath().getName();
        if (fileName.startsWith("city")) {
            //如果文件名字以city开头，说明该数据来自于city.csv表格
            //csv表格在Linux中默认分隔符是英文逗号
            String[] strings = value.toString().split(",");
            if (!"id".equals(strings[0])) {
                //判断是否是标题，只有不是标题的时候再取出数据
                context.write(new Text(strings[0]), new Text("#" + strings[1] + "-" + strings[2]));
            }
        } else if (fileName.startsWith("part")) {
            //如果文件名字以part开头，说明该数据来自于part-r-00000
            //日期:20180718-监测点:2604	22
            String[] strings1 = value.toString().split("-");
            String date = strings1[0].split(":")[1];
            String[] strings2 = strings1[1].split("\t");
            String pm25Avg = strings2[1];
            String id = strings2[0].split(":")[1];
            context.write(new Text(id), new Text("$" + date + "-" + pm25Avg));
        }
    }
}