package com.tledu.mr;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
import java.util.StringTokenizer;

public class WordCountMapper extends Mapper<Object, Text, Text, IntWritable> {
    @Override
    protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
        // map数据处理，将输入的kv转成输出的kv
        // 词频统计，输出的kv应该word, freq的类型的数据
        // 输入的value是不是一行数据，多个单词用空格分割了
        // 1. 将单词提取出来
        StringTokenizer str = new StringTokenizer(value.toString());
        while (str.hasMoreTokens()) {
            // 获取到了每一个单词
            String wordStr = str.nextToken();
            // 输出kv形式，k就是这个单词，value是个数，这个单词就是1个
            Text word = new Text(wordStr);
            IntWritable one = new IntWritable(1);
            // 将数据输出
            context.write(word,one);
        }
    }

    public static void main(String[] args) {
        String str = "cd,ww,yy,mm,job021";
        StringTokenizer stringTokenizer = new StringTokenizer(str, ",");
        while (stringTokenizer.hasMoreTokens()) {
            System.out.println(stringTokenizer.nextToken());
        }
    }
}
