package org.example.mpareduce.wordcount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class MineWordCountMap extends Mapper<LongWritable, Text,Text, IntWritable> {


    /**
     *
     * @param key  k1 偏移量
     * @param value  v1一行数据
     * @param context  上下文   上文是hdfs中的文件，读取文件中的数据
     *                          下文是reducer  map逻辑设计之后数据结论输出给reducer
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//        super.map(key, value, context);
        String arrs[] =value.toString().split("[\\s.]+");
        for (String arr : arrs) {
            //写k2 v2就是用context写
            context.write(new Text(arr), new IntWritable(1));
        }

    }

}
