package Compress.WordCount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class WCMapper extends Mapper<LongWritable,Text, Text, IntWritable> {
    //KeyIn ===>LongWriteable 每行的偏移量（理解成记录读到哪一行）

    /**
     *
     * 0
     * 52
     * 75
     * 81
     * 92
     * 99
     * 140
     * ...
     *
     */
    //ValueIn ===>Text 读取到的一行数据

    /**
     * 0    hadoop hdfs MapReduce Yarn zookeeper Flume azkabna
     * 52   hadoop hdfs MapReduce
     * 75   Yarn
     * 81   zookeeper
     * ...    ...
     */
    //KeyOut ===>Text 取到的一个单词

    //ValueOUt===>IntWritable  int类型的数-->1

    //重写父类的map 方法 快捷键(alt+inster   override Method)
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //System.out.println(key.get());
        //业务逻辑（自己定义写）
        //拿到数据,转换成String类型
        String line = value.toString(); //eg:hadoop hive sprk （一行数据）

        //按照空格切分一行单词
        String[] words = line.split(" ");

        //封装数据(转换成 Text, IntWritable)
        //遍历
           for (String word: words) { //hadoop
                //存入单词（设置Key）
               Text text = new Text(word);
               // 设置Value
               IntWritable intWritable = new IntWritable(1);//(hadoop,1)
               //写入数据
               context.write(text,intWritable);
           }
    }
}
