package yz.mr.CombineFile;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class CombineFIleMap extends Mapper<LongWritable, Text,Text, IntWritable> {
    /**
     *
     * @param key   代表的是偏移量
     * @param value  代表的是文本文件的一行数据
     * @param context  代表上下文对象，连接Map和Reduce的主线
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException {
        String[] words = value.toString().split(" ");

        for (String word : words) {
            //根据输出类型的要求。要将数据进行类型转化
            Text outKey = new Text(word);  //这样就将String类型转换成了MapReduce中的Text的类型

            IntWritable outValue = new IntWritable(1);

            //write函数是将数据写出到磁盘，之后提供给Reduce阶段进行数据拉取
            context.write(outKey,outValue);

        }

    }
}
