import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * @author yitta
 * @date 2021/11/14 22:42
 */



    public class WordcountMapper extends Mapper<LongWritable,Text,Text, IntWritable> {

        // 设置 map端输出的 key ,value类型
        Text k = new Text();
        IntWritable v = new IntWritable(1);

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            // 重新实现map的数据输入 wordcount

            // 数据：dear bear river

            // 1.获取第一行
            String line = value.toString();
            // 2. 切割。 csv 文件，数据中间的分割是,
            String [] words = line.split("\t");
            //3.输出
            for (String word:words){
                // 第一次循环
                k.set(word);  // k : dear
                context.write(k,v); // dear 1
            }


        }



}
