/*******************************************************************************
 * Package: com.hadoop.mr
 * Type:    WordCountMapper
 * Date:    2024-05-14 22:25
 *
 * Copyright (c) 2024 LTD All Rights Reserved.
 *
 * You may not use this file except in compliance with the License.
 *******************************************************************************/
package com.hadoop.flow;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * 编程模型：https://img-blog.csdnimg.cn/2020070616031612.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L2h4eGp4dw==,size_16,color_FFFFFF,t_70
 * 功能描述： 分词
 * KEYIN: Map任务读数据的key类型，offset，是每行数据起始位置的偏移量，是Long类型   每行从那开始
 * VALUEIN: Map任务读数据的value值，其实就是 一行行 的字符串，String类型
 * <p>
 * hello world welcome
 * hello welcome
 * <p>
 * KEYOUT: map方法自定义实现输出的key类型, String类型
 * VALUEOUT: map方法自定义实现输出的value类型, Int类型
 *
 * @author Songxianyang
 * @date 2024-05-14 22:25
 */
public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowWritable> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        System.out.println("---------Mapper--------");
        // 分词
        String[] split = value.toString().split("\\|");
        // 手机号
        String phoneId = split[1];
        // 上传流量 和下载流程
        Long up = Long.valueOf(split[split.length-3]);
        Long down = Long.valueOf(split[split.length-2]);
        context.write(new Text(phoneId), new FlowWritable(up,down));
        System.out.println(key + ":mapper:" + new FlowWritable(up,down)+"手机号key：：："+phoneId);
    }
}
