package base.test1.wordcount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

//LongWritable 单词的偏移量 k1
//Text 一行的内容            V1
//单词 String Text        k2
//计数    int             v2
public class MyMapper extends Mapper<LongWritable,Text,Text,IntWritable> {
    private Text word = new Text();
    private IntWritable one = new IntWritable(1);

    //重写方法
    @Override
    protected void map(LongWritable key,Text value,Context context) throws IOException,InterruptedException{

        //获得1行数据
        String data = value.toString();

        //按照空格进行切分数据，进行分词
        String[] words =data.split(" ");

        System.out.println(key);

        //遍历单词数组，以<k2,v2>单词，数量形式提交
        for(String word : words){
            this.word.set(word);
            //输出<k2,v2>k2:单词，v2：数字1，代表着单词记录数+1
            context.write(this.word,one);
        }
    }
}
