package com.shujia.wyh.mr.kqzl2;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

/*
china_sites_20180304.csv-r-00000
    日期:20180304-监测点编号==:2647	26  ---> <"2647","#日期:20180304:26">
city.csv
    2647,豫源饭店,白银                    ---> <"2647","$白银豫源饭店">

    <"2647",["#日期:20180304:26","$白银豫源饭店"]>
    <"2647",["$白银豫源饭店","#日期:20180304:26"]>

 */
public class KQZL2Mapper extends Mapper<LongWritable, Text, Text, Text> {
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
        FileSplit inputSplit = (FileSplit) context.getInputSplit();
        String pathName = inputSplit.getPath().getName(); //获取切片所在的文件名字
        String line = value.toString();

        if (pathName.startsWith("china")) {
            String s1 = line.split("\t")[0];
            String avg = line.split("\t")[1];

            String id = s1.split(":")[2];
            String dateTime = s1.split(":")[1].split("-")[0];
            context.write(new Text(id), new Text("#日期:" + dateTime + ":" + avg));
        } else if (pathName.startsWith("city")) {
            String[] split = line.split(",");
            String id = split[0];
            String city = split[2] + split[1];
            context.write(new Text(id), new Text("$" + city));
        }
    }
}
