package DivorceSum4;


import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * 需求：统计近二十年全国离婚登记数
 * 分析：自定义一个对象DivorceCountBean,用于封装每年全国离婚数，注意需要实现hadoop的序列化机制
 *      以全国national作为map阶段输出的key，作为value
 */
public class DivorceSumMapper extends Mapper<LongWritable, Text, DivorceCountBean, FloatWritable> {
    int i = 0;

    /**
     * 创建输出对象
     */

    DivorceCountBean outKey = new DivorceCountBean();
    FloatWritable outvalue = new FloatWritable();
    /**
     * 重写map父类
     */
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, DivorceCountBean, FloatWritable>.Context context) throws IOException, InterruptedException {
        i++;
        if(key.get() == 0){
            return;
        }


        /**
         * 2.读取一行进行切割
         */
        String[] fileds = value.toString().split(",");
        /**
         * 北京市，东城区,2022年,0.53,0.52,0.62,0.47,0.01,0.25,148,
         * 提取数据 省、市、离婚登记数
         */
        String province = fileds[0];
        String city = fileds[1];

        outKey.set(province,city);
        //获取离婚登记数
        System.out.println("1:"+i+"2:"+value.toString());
        float divorce_num = Float.parseFloat(fileds[8]);

        outvalue.set(divorce_num);

        /**
         * 输出结果
         */
        context.write(outKey,outvalue);
    }
}
