package com.sheep.hadoop.mapreduce;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * KEYIN  K1的类型
 * VALEIN v1的类型
 * KEYOUT  k2
 * VALUEOUT v2
 * @author wangze
 * @date 2021-02-28 15:40.
 **/
public class WordCountMapper extends Mapper<LongWritable, Text,Text,LongWritable> {


	/**
	 * Map任务
	 * 就是将k1，v1 转换为k2,v2
	 * @param key k1 行便宜量
	 * @param value v1 每一行的文本数据
	 * @param context 表示上下文对象
	 * @throws IOException
	 * @throws InterruptedException
	 */
	/**
	 * k1  v1
	 * 0  hello,world,hadoop
	 * 15 hdf,hive,hello
	 *  k2  v2
	 *  hello 1
	 *  world 1
	 *  hdfs 1
	 *  hello 1
	 */
	@Override
	protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

		String[] values=value.toString().split(",");
		Text k2Text=new Text();
		LongWritable k2Value=new LongWritable();
		for (String s : values) {
			k2Text.set(s);
			k2Value.set(1);
			context.write(k2Text,k2Value);
		}

	}
}
