package com.mapreduce.wordcount;


import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
//Map阶段
// k1 map输入的key，类型是LongWritable
//v1 map输入的value 类型 Text
//k2 map输出 类型 Text
//v2 map输出 类型IntWritable
//      泛型                 k1       v1    k2       v2
public class WordCountMapper extends Mapper <LongWritable, Text,Text, IntWritable> {
    //Map阶段的输出key
    Text k2 = new Text();
    //Map阶段的的value
    IntWritable v2 = new IntWritable();

    //map方法执行次数有行数决定
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException {

        //1. v1由Text类型转换为String：toString()方法
        String line = value.toString(); //line的内容:    "Hello world"
        //2.按空格进行分词：split()方法
        String[] words = line.split(" "); // words的内容:   {"Hello" ,"world"}

        //遍历数组并对单词进行计数
        for (String word : words) {
            //设置k2的值
            k2.set(word);
            //一个单词计1次
            v2.set(1);
            //3.输出k2, v2：context.write()方法
            context.write(k2,v2);

        }

    }
}
