package com.exmaple.mapreducer.wordcount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
// k1 map输入的key，类型是LongWritable
//v1 map输入的values 类型 Text
//k2 map输出 类型 Text
//v2 map输出 类型IntWritable
//      泛型                 k1       v1    k2       v2
public class WordCountMapper extends Mapper<LongWritable, Text,Text, IntWritable> {
//public class WordCountMapper extends Mapper<LongWritable, Text,Text, IntWritable> {
    Text k2 = new Text();
    IntWritable v2 = new IntWritable(1);
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

       //a good beginning is half the battle
        // 1. v1由Text类型转换为String：toString()方法
        String lineContent = value.toString();
        //2. 按照拆分出单词，以空格拆分
        String[] words = lineContent.split(" ");
        //words的内容 {"a","good", "beginning", "is" ,"half" ,"the" ,"battle"}
       //遍历单词所在的数组，取出单个单词，
        for (String word : words) {
            k2.set(word);
            v2.set(1);
            //3. map 输出：以单词为键即k2,以个数为值即v2
            context.write(k2,v2);
        }


    }


}
