package com.chb.pagerank;

import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

/**
 *	
 */
public class MyPageRankReducer extends Reducer<Text, Text, Text, Text>{
	@Override
	protected void reduce(Text key, Iterable<Text> values, Context context) 
			throws IOException,InterruptedException {
		double prSum = 0.0;
		Node sourceNode = null;
		for (Text text : values) {
			Node node = Node.fromMR(text.toString());
			if (node.containsAdjacentNodes()) {
				sourceNode = node;//旧的PR值
			}else {
				prSum += node.getPageRank();
			}
		}
		//计算出新的PR值
		double q = 0.85;//阻尼系数
		int N = 4;//总页面数
		double newPR = (1-q)/N + q*prSum;
		
		//对比新旧PR差值，直到数据收敛
		double d = newPR - sourceNode.getPageRank();
		//因为incerment(long ), 所以要保证数据正确性， 所以乘以1000.0
		int j = (int) (d*1000.0);
		j = Math.abs(j);
		System.out.println(j);
		context.getCounter(MyRunJob.MyCounter.countName).increment(j);
		
		//输出新的PR值,此次的输出作为下一轮map的输入
		sourceNode.setPageRank(newPR);
		context.write(key, new Text(sourceNode.toString()));
	}
}
