package com.example.mapreduce.combiner;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * Created with IntelliJ IDEA.
 * ClassName: WordCountMapper
 * Package: com.example.mapreduce.wordcount
 * Description:
 * User: fzykd
 *
 * @Author: LQH
 * Date: 2023-07-15
 * Time: 9:50
 */

/**
 * 都是hadoop自定义的类型
 * KEYIN map阶段输入的key的类型的 是偏移量 LongWritable
 * VALUEIN map阶段输入的val类型 是Test
 * KEYOUT map阶段输出的key类型 Test
 * VALUEOUT map阶段输出的val就是 IntWritable
 */
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
    //每一行执行一次map 执行一次 new一次 浪费资源
    private Text outK = new Text();
    //在Map阶段 不进行聚合 只是将每一个单词进行切割之后标1
    //固定格式 (hello,1)(abc,1)
    private IntWritable outV = new IntWritable(1);
    //这是输入的Long和val  context联合上下文 和 Reduce
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //1.获取一行 将Text转为String 有更丰富的字符串处理操作
        String line = value.toString();
        //2.切割 通过空格切割
        String[] words = line.split(" ");
        //循环写出
        for (String word : words){

            outK.set(word); //转为Text类型

            //参数是输出的key val
            //写出
            context.write(outK,outV);
        }
    }
}
