package cn.jcet.mapreduce.wordcount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * @author 鲜磊
 * @Date 2020/12/1 23:29
 * 编写Mapper类
 */
public class WordCountMapper extends Mapper<LongWritable,Text,Text,IntWritable> {

    // 拆分后的结果
    // atguigu 1

    private Text k = new Text();
    private IntWritable v = new IntWritable(1);

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//        super.map(key, value, context);
        /**
         * 输入的是文本内容，key
         * 输出的方式以数字统计 vlaue
         */
        // atguigu atguigu
        // 先将text数据类型转换为string类型, 读取这一行的数据
        String line = value.toString();
        // 对行的数据进行切割，使用split 使用空格切割
        String[] words = line.split(" ");
        //对切割的数组进行循环
        for (String word : words) {
            // 设置内容，key是atguigu
            k.set(word);
            // 对Conexant设置K V键值对
            context.write(k,v);
        }
    }
}
