package cn.xiao.mr;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * Mapper<key的类型， 值的类型，map的输出【k2类型】, v2的类型【v2 = 1，类型统一使用long就好】 >
 *
 * Mapper<Long, String, String, Long>
 *
 * hadoop 下的类型
 * text类型是     import org.apache.hadoop.io.Text;
 * Mapper<LongWritable, Text, Text, LongWritable>
 */
public class WCMapper extends Mapper<LongWritable, Text, Text, LongWritable> {

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//        super.map(key, value, context);

//        // 1. 接受数据
//        String line = value.toString();         // value是text类型，转换为String类型
//
//        String[] words = line.split(" ");       // 切割
//
//        for (String w : words) {
//            // 出现一次，记一个1
//            context.write( new Text(w), new LongWritable(1));
//        }


        //accept
        String line = value.toString();
        //split
        String[] words = line.split(" ");
        //loop
        for(String w : words){
            //send
            context.write(new Text(w), new LongWritable(1));
        }
    }
}
