package edu.hit.crawler.mapred;

import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import edu.hit.crawler.io.CrawItem;

/*
 * 合并相同url的所有CrawItem项，更新抓取状态
 * @author zzc (zzc3615@gmail.com)
 */

public class ParseReducer extends Reducer<Text, CrawItem, Text, CrawItem> {

	@Override
	public void reduce(Text key, Iterable<CrawItem> values, Context context)
			throws IOException, InterruptedException {

		boolean isSuccess = false;// 是否抓取成功
		boolean isRetry = false; // 
		CrawItem value = null;

		int n = 0;
		for (CrawItem it : values) {
			if (n == 0)
				value = new CrawItem(it);
			if (it.getStatus() == CrawItem.SUCCESS) {
				isSuccess = true;
			} else if (it.getStatus() == CrawItem.RETRY) {
				isRetry = true;
			}
		}
		//抓取成功的网页设置抓取状态
		if (isSuccess) {
			value.setStatus(CrawItem.SUCCESS);
		} else if (isRetry) {
			value.setStatus(CrawItem.RETRY);
		}
		context.write(key, value);

	}
}
