package com.haisen.mr;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
import java.util.StringTokenizer;

//基本类型都包装成wirteable，可以序列化，集群间传递
//类型还有排序的角色，根据数值或者字典排序，核心就是比较器
public class MyHDMapper extends Mapper<Object,Text,Text,IntWritable> {

    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();

    /**
     * Object key 默认字符串的偏移量
     * value 字符串的值
     * @param key
     * @param value
     * @param context
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
        //super.map(key, value, context);
        //hello sxt 102
        StringTokenizer itr = new StringTokenizer(value.toString());
        while (itr.hasMoreTokens()){
            //赋值
            word.set(itr.nextToken());
            //key,value
            context.write(word,one);
        }

    }
}
