package hadoop.WordCount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class mapper extends Mapper<LongWritable, Text,Text, IntWritable> {
    private Text t1 = new Text();
    private IntWritable L1 = new IntWritable();

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException {
        // 拿取一行数据并转换为String
        String line = value.toString();
        // 根据分隔符进行切分
        String[] words = line.split("\\s+"); // \\s+按空白字符切割
        // 遍历数组
        for (String word : words) {
            // 输出数据，把每个单词标记1  如：<java,1>
            t1.set(word);
            L1.set(1);
            context.write(t1, L1);
        }
    }
}
