package wordcount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

// map阶段
// KEYIN: 输入数据的key的类型
// VALUEIN: 输入数据的value的类型
// KEYOUT: 输出数据的key的类型  Social Networks,2
// VALUEOUT: 输出数据的value的类型
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
    private final static IntWritable valutOut = new IntWritable(1);
    private Text keyOut = new Text();

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        // 1. 获取一行
       // String line = value.toString();
        String line=value.toString();

        // 2. 切割
        //String[] words = line.split("\t");
        String[] words=line.split("\t");

        //封装数据
        String Category=words[6];

       // keyOut.setUniqueVisitors(UniqueVisitors);

//        for (String word :
//                words) {
//            keyOut.set(word);
//
//            context.write(keyOut, valutOut);
//        }

        context.write(new Text(Category), valutOut);
        /*// 构造一个用来解析str的StringTokenizer对象。java默认的分隔符是“空格”、“制表符(‘\t’)”、“换行符(‘\n’)”、“回车符(‘\r’)”。
        StringTokenizer tokenizer = new StringTokenizer(value.toString());

        // hasMoreTokens(): 返回是否还有分隔符
        while (tokenizer.hasMoreTokens()) {
            // nextToken(): 返回从当前位置到下一个分隔符的字符串
            this.keyOut.set(tokenizer.nextToken());

            context.write(keyOut, valutOut);
        }*/
    }
}
