package com.example.mapreducr.writable;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * Created with IntelliJ IDEA.
 * ClassName: WordCountMapper
 * Package: com.example.mapreducr.wordcount
 * Description:
 * User: fzykd
 *
 * @Author: LQH
 * Date: 2023-07-15
 * Time: 20:20
 */

/**
 * 编写Mapper规范
 * 自定义Mapper要基础Hadoop的Mapper
 * Mapper输入参数和输出参数都是k,v
 * Mapper的业务逻辑在map()方法中 一行内容执行一次 map() (MapTask进程)
 * 重写map方法 里面是业务逻辑
 */
public class WordCountMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
    private Text outK = new Text();
    //写出去的值都是(word,1)
    private FlowBean outV = new FlowBean();
    //输入的参数是k,v 是偏移量 和 一行的内容
    //输出的参数是 (abc,1)
    //参数是对应的 hadoop包装过的序列化参数
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //1.将MapTask传来的文本转换成String 操作String的方法多
        String text = value.toString();
        //切割数据
        //1	13736230513	192.196.100.1	www.atguigu.com	2481	24681	200
        String[] split = text.split("\t");

        //获取有用的数据
        //固定的数据文本 获取到一行固定格式 使用String方法处理
        String phone = split[1];
        String up = split[split.length - 3];
        String down = split[split.length - 2];

        //封转outK outV
        outK.set(phone);
        //转换成Long类型
        outV.setUp(Long.parseLong(up));
        outV.setDown(Long.parseLong(down));
        outV.setSum(); //之前重写的无参构造 up 和 down相加

        //写出
        context.write(outK,outV);


    }
}
