package com.neuedu.hadoop.wordcount;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;

/*
 四个泛型解释:
 KEYIN :K1的类型
 VALUEIN: V1的类型
 KEYOUT: K2的类型
 VALUEOUT: V2的类型
 */
public class WordCountMapper extends Mapper<LongWritable, Text, Text,
        LongWritable> {
    private final Text outKey=new Text();
    private final LongWritable outValue=new LongWritable(1);
    //map⽅法就是将K1和V1 转为 K2和V2
 /*
 参数:
 key : K1 ⾏偏移量
 value : V1 每⼀⾏的⽂本数据
 context ：表示上下⽂对象
 */
 /*
 如何将K1和V1 转为 K2和V2
 K1 V1
 0 hello,world,hadoop
 15 hdfs,hive,hello
 ---------------------------
 K2 V2
 hello 1
 world 1
 hdfs 1
 hadoop 1
 hello 1
 */
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, LongWritable>.Context context) throws IOException, InterruptedException {
        String[] result=value.toString().split(",");
        for(String str:result){
            outKey.set(str);
            context.write(outKey,outValue );
        }
    }
}
