/**
 *
 * Copyright Pact Lab of H.I.T.
 *
 * Designed and Implemented by Grid Researching Group, 
 * Pact Lab, Harbin
 * 
 * This Project is part of the national 973 Project:
 * Internet Based Virtual Computing Environment
 *
 * http://pact518.hit.edu.cn
 * 
 * Author:       Meteor <meteorlxk@gmail.com> 
 * Copyright:    pact518 
 * Version:      1.0
 * Created:      2009-5-4 
 * LastModified: 2009-5-4
 */
package edu.hit.pact.pgse.crawler.web;

import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.MalformedURLException;
import java.net.URL;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.PriorityBlockingQueue;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import edu.hit.pact.pgse.bean.TaskPieceProperty;
import edu.hit.pact.pgse.crawler.finish.FinishChecker;
import edu.hit.pact.pgse.crawler.util.BufferedQueue;
import edu.hit.pact.pgse.crawler.util.DuplicateUrlFilter;
import edu.hit.pact.pgse.crawler.util.FileFilter;
import edu.hit.pact.pgse.crawler.util.RobotFileManager;
import edu.hit.pact.pgse.crawler.util.RobotFileScanner;
import edu.hit.pact.pgse.util.AbstractThread;
import edu.hit.pact.pgse.util.Globals;

/**
 * @author meteorlxk
 */
public class PagePrepareProcessor {

	private Log logger;

	private TaskPieceProperty taskPieceProperty;

	private PriorityBlockingQueue<UrlUnit> cleanUrlQueue;//use to keep the order of url, according to the number of backlinks
	private ConcurrentHashMap<String, UrlUnit> urlUnitMap;//use to trace the urlUnit and modify the backlinks

	private FileFilter fileFilter;
	private RobotFileManager robot;
	private DuplicateUrlFilter duplicateUrlFilter;
	private Pattern includePattern;

//	private Timer queueInfoRecorder;
//	private DSInfoOutput dsoutput;

	public PagePrepareProcessor(TaskPieceProperty pieceProperty, FileFilter fileFilter, 
			RobotFileManager robot, DuplicateUrlFilter duplicateUrlFilter) {

		this.logger = LogFactory.getLog(PagePrepareProcessor.class);
		this.taskPieceProperty = pieceProperty;
		this.cleanUrlQueue = pieceProperty.getCleanUrlQueue();
		this.urlUnitMap = pieceProperty.getUrlUnitMap();

		this.fileFilter = fileFilter;
		this.robot = robot;
		this.duplicateUrlFilter = duplicateUrlFilter;
		
		this.includePattern = Pattern.compile(((WebTaskPiece)taskPieceProperty.getPiece()).getDocURLRegexInclude());
//		this.queueInfoRecorder = new Timer();
//		dsoutput = new DSInfoOutput();
//		queueInfoRecorder.schedule(dsoutput, 3 * 1000, 3 * 1000);
		// queueInfoRecorder.schedule(new UrlRecorder(), 10*1000, 120*1000);
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see edu.hit.pact.pgse.util.AbstractThread#mainRun()
	 */
	public void filterLinks(List<String> interLinks, UrlUnit currentUrlUnit){
		// 检查是否完成
		if (FinishChecker.isFinish(logger, taskPieceProperty)) {
			logger.info("since this piece is canceled or finished, clear the to do url queue");
			this.cleanUrlQueue.clear();
			return;
		}

		/*
		 * We don't need to fetcher if url's depth larger than maxDepth.
		 */
		WebTaskPiece webTaskPiece = (WebTaskPiece)taskPieceProperty.getPiece();
		if (currentUrlUnit.getCurrDepth() >= webTaskPiece.getMaxDepth()) {
			logger.debug("the url's depth is larger than "
					+ webTaskPiece.getMaxDepth()
					+ ", so drop it.");
			return;
		}// end if

		for (String interLink : interLinks){
			System.out.println(interLink);
			
			if (urlUnitMap.containsKey(interLink)){
				urlUnitMap.get(interLink).incrementBackLinks();
			}
			
			Matcher includeMatcher = includePattern.matcher(interLink);
			if (includeMatcher.find() == false){
				logger.info("includeMatcher.find() == false");
				continue;
			}
			
			/*
			 * filter duplicate url most url is duplicate, so check filter duplicate
			 * should execute first!
			 */
			if (duplicateUrlFilter.contains(interLink)) {
				logger.info("drop duplicate url :" + interLink);
				continue;
			}

			/*
			 * only support HTTP protocol
			 */
			String protocol = null;
			try {
				protocol = (new URL(interLink)).getProtocol();
			} catch (MalformedURLException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
			if (!protocol.equalsIgnoreCase("HTTP")) {
				logger.info(interLink + "'s protocal is not HTTP");
				continue;
			}

			/*
			 * if supported.file.type.list contains the prefix of the todoUrl , it
			 * return true if unsupported.file.type.list contains the prefix of the
			 * todoUrl , it return false or else return true
			 */
//			if (!this.fileFilter.iCanProcess(interLink)) {
//				logger.info(interLink + "'s file type has not been support");
//				continue;
//			}

			/*
			 * filter robots file
			 */
			UrlUnit urlUnit = new UrlUnit(interLink, currentUrlUnit.getCurrDepth()+1);
//			if (this.robot.disallow(urlUnit)) {
//				logger.info("isRobots(" + interLink + ")");
//				continue;
//			}
			
			if (!urlUnitMap.containsKey(interLink)){
				urlUnitMap.put(interLink, urlUnit);
			}
			
			logger.info("cleanUrlQueue.add");
			this.cleanUrlQueue.add(urlUnit);
		}

	}
}
