package com.mapreduce.wordcount;

//Map阶段的逻辑代码


import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

//泛型 k1 v1 k2 v2
public class WordCountMapper extends Mapper<LongWritable, Text,Text, IntWritable> {
    //优化： 为了避免重复多次创建Text 和IntWritable对象造成占用大量的内存，所以应该定义为全局变量
    //map阶段输出的key
    Text k2 = new Text();
    //map阶段输出的value
    IntWritable v2 = new IntWritable();

    //map()方法执行的次数是由行数来决定
    @Override
    protected void map(LongWritable k1, Text v1, Mapper<LongWritable, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException {

        //1. v1由Text类型转换为String：toString()方法
        String line = v1.toString(); //line的内容： Hello World
        //2.按空格进行分词：split()方法
        String[] words = line.split(" "); //words的内容：  {"Hello","World"}
        //遍历数组并统计次数
        for (String word : words) {
            //给k2设值
            k2.set(word);
            //一个单词计1次
            v2.set(1);
            //3.输出k2, v2：context.write()方法
            context.write(k2,v2);
        }



    }
}
