package com.jml.mapreduce.自定义序列化;

import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

/**
 * 依次是：输入：行号，文本
 *       输出：电话号码，流量对象
 */
public class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean>{

    Text key = new Text();
    FlowBean value = new FlowBean();


    //7 	13560436666	    120.196.100.99		1116		 954			200
    //id	手机号码		    网络ip			    上行流量      下行流量         网络状态码
    /**
     * 框架将数据一行行输入进入,我们把他变成单词和出现的个数
     * @param key 行号
     * @param value 行内容
     * @param context 任务本身
     */
    @Override
    protected void map(LongWritable key, Text value, Context context)	throws IOException, InterruptedException {
        // 1 获取一行
        String line = value.toString();
        // 2 切割字段
        String[] fields = line.split("\t");
        // 3 封装对象
        // 取出手机号码，就是key
        String phoneNum = fields[1];
        // 取出上行流量和下行流量，就是value
        long upFlow = Long.parseLong(fields[fields.length - 3]);
        long downFlow = Long.parseLong(fields[fields.length - 2]);
        this.key.set(phoneNum);
        this.value.setDownFlow(downFlow);
        this.value.setUpFlow(upFlow);
        // 4 写出
        context.write(this.key, this.value);
    }
}
