package cn.lsh.reducer;

import cn.lsh.mapper.HotCommentMapper;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class HotComment2Reducer extends Reducer<Text, IntWritable, Text, DoubleWritable> {
	private final DoubleWritable rval = new DoubleWritable();

	@Override
	protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
		// 输入数据样本，有三类数据：
		// 第一组
		// 10247087	48
		// 能_10247087	1
		// 相逢_10247087	1
		// 第二组
		//能	0
		//能	0
		//第三组
		//count 	4340

		if (HotCommentMapper.counter.equals(key)) {
			rval.set(values.iterator().next().get());
			context.write(key, rval);
		} else {
			int fileWordCount = 0;
			boolean flag = true;
			double countFile = 0;
			for (IntWritable value : values) {

				if (0 == value.get()) {
					//统计出现某词的文件个数
					countFile += 1;
				} else if (flag) {
					//获取文件的总分词数
					fileWordCount = value.get();
					flag = false;
				} else {
					//对if做归一化
					double wordCount = value.get();
					//这里必须用double除才能获得double，从而保留小数
					rval.set(wordCount / fileWordCount);
					context.write(key, rval);
				}
			}
			if (countFile > 0) {
				rval.set(countFile);
				context.write(key, rval);
			}
		}
	}
}
