package crawler.web.framework;

import javax.annotation.Resource;

import library.Tracer;
import library.database.mongo.MongoDB;
import library.functions.Function0PV;
import library.functions.Function2PR;
import library.thread.flow.NodeMessage;
import library.thread.flow.ParameterBox;
import library.thread.flow.WorkflowContainer;
import library.thread.flow.WorkflowNode;

import org.apache.commons.lang.StringUtils;
import org.htmlparser.Parser;
import org.htmlparser.tags.Html;
import org.htmlparser.util.ParserException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;

import crawler.web.beans.CrawlPageContext;
import crawler.web.beans.CrawlURL;
import crawler.web.beans.ProxySetting;
import crawler.web.component.HtmlDigger;
import crawler.web.engine.BDBFrontier;
import crawler.web.engine.config.SpBN;
import crawler.web.engine.config.Suite;
import crawler.web.engine.custom.ProxyDistributor;
import crawler.web.engine.custom.TwoDBFrontier;
import crawler.web.utils.MyHttpClient;

/**
 * @author waynechen Feb 8, 2012 11:18:01 PM
 */
@Component(value = "Crawler")
@Scope(value = "prototype")
public class Crawler extends WorkflowNode {

	@Autowired(required = true)
	public Crawler(//
	@Value("#{T(crawler.web.framework.CrawlerContainer).getNO()}") String name, //
			@Qualifier("CrawlerContainer") WorkflowContainer<Crawler> container//
	) {
		super("cc " + name, container);
	}

	@Resource(name = "ProxyDistributor")
	private ProxyDistributor proxyDistributor;

	/**
	 * http 客户端
	 */
	private MyHttpClient client;
	/**
	 * html分析器
	 */
	private Parser parser = new Parser();
	/**
	 * 频繁抓取间隔
	 */
	private static final int FREQUENCY_INTERVAL = 1000;

	@Resource(name = "URLFilter")
	private Function2PR<Crawler, CrawlURL, Boolean> urlFilter;

	@Resource(name = "HumanActionSimulator4VisitingWeb")
	private Function0PV humanSimulator;

	@Resource(name = SpBN.bean_MongoDB)
	private MongoDB mongo;

	@Resource(name = SpBN.bean_url_frontier)
	private TwoDBFrontier urlFrontier;

	@Resource(name = SpBN.bean_failure_urls)
	private BDBFrontier failureURLs;

	@Resource(name = SpBN.conf_web_max_retry_times)
	private Integer maxRetryTimes;

	@Resource(name = "CrawlFrequencyFilter")
	private Function2PR<Crawler, CrawlURL, Boolean> crawlIntervalFilter;

	@Resource(name = SpBN.conf_htmlTabName)
	private String tableName;

	@Resource(name = "Dispatcher")
	private Dispatcher dispatcher;

	@Resource(name = SpBN.conf_failure_stop_count)
	private Integer fsc;

	@Override
	protected void nodeExecute(ParameterBox args) {
		Tracer.info("线程: " + name + " 开始");
		//初始化代理
		initHttpClient();

		//设置二次抓取时需要的参数
		ParameterBox params = new ParameterBox();
		params.push("parser", parser);
		params.push("caller", this);
		params.push("client", client);

		int failureCount = 0;
		try {
			while (true) {
				if (isStopped())
					break;

				//处理消息
				handleMessage();

				//连续失败大于fsc次后, 终止抓取数据
				if (failureCount > fsc)
					getContainer().sendMessage2me(this, WorkflowContainer.MSG_STOP_ALL_SUBNODES, null);

				// 查看是不是可以抓取的时间段, 否则线程暂停
				humanSimulator.apply();

				Tracer.debug("从库里取url");
				//从数据库里提出要爬行的url
				CrawlURL url = urlFrontier.pop();
				if (url == null) {
					Tracer.info("库中没有url,等待中...");
					sleepWithoutThrowing(2000);
					continue;
				}

				//防止频繁抓取
				if (!crawlIntervalFilter.apply(this, url)) {
					Tracer.trace("frequency", "防止频繁抓取" + url.getOriUrl());
					if (url.getRetryTimes() < maxRetryTimes)
						urlFrontier.putBack(url);
					sleepWithoutThrowing(FREQUENCY_INTERVAL);
					continue;
				}

				//连接网站, 获取html
				try {
					String html = client.getHtml(url);
					if (StringUtils.isEmpty(html))
						throw new Exception("下载的html代码为空");
					else if (html.equals("404"))
						continue;

					Tracer.debug("获得html");
					//TODO 从变量html中 将iframe下载, 并插入到html的合适的地方 ,设计HtmlDigger.replaceIframe()方法 
					url.setHtml(html);
					Tracer.trace("html", html);
					Tracer.trace("html", url.getOriUrl());
					parser.setInputHTML(html);
					parser.setEncoding(url.getEncoding());
				} catch (Exception e) {
					failureCount++;
					Tracer.info(">>>>>>>>>>>>>>>下载网页失败-----------------" + e.getMessage());
					Tracer.info(url.getOriUrl() + ", 已重试次数: " + url.getRetryTimes());
					//失败后, 增加失败次数
					url.setRetryTimes(url.getRetryTimes() + 1);
					//小于重试次数则, 放回url库中
					//否则放到失败队列中
					if (url.getRetryTimes() < maxRetryTimes)
						urlFrontier.putBack(url);
					else
						failureURLs.put(url);
					continue;
				}

				CrawlPageContext context = CrawlPageContext.createInitInstance();
				context.setUrl(url);

				//收集新的url
				digURL(urlFrontier, urlFilter, context);

				//进入分发器
				//哪个组件能处理,此url则处理此url
				//获取附加抓取结果
				//parser在分发后可能被修改
				params.push("pageContext", context);
				dispatcher.dispatch(params);
				if (!((Boolean) params.peek("isSuccess"))) {
					url.setHtml("");//清空刚才抓取的html
					urlFrontier.putBack(url);
					continue;
				}

				//存入mongodb
				context.getUrlList().clear();//不存储,所有的url
				context.setParseStatus(Suite.ParseStatus.DOWNLOAD.getValue());//设置状态
				mongo.save(tableName, context);
				failureCount = 0;
			}
		} catch (Exception e) {
			Tracer.exception("线程: " + name + " 发生了异常");
			Tracer.exception(e);
		} finally {
			Tracer.info("线程: " + name + " 结束");
			releaseProxy();
			//回收爬虫, 释放空间
			getContainer().sendMessage2me(this, CrawlerContainer.MSG_REMOVE_SUBNODE, this);
		}
	}

	private boolean digURL(TwoDBFrontier urlFrontier, Function2PR<Crawler, CrawlURL, Boolean> urlFilter, CrawlPageContext context) throws ParserException, Exception {
		boolean isAdd = false;
		Html html = HtmlDigger.findHtmlNode(parser);
		if (html != null) {
			HtmlDigger.digURL(html, context);
			int count = 0;
			//存入新的url
			for (CrawlURL u : context.getUrlList()) {
				u.setLayer(context.getUrl().getLayer() + 1);
				if (urlFilter.apply(this, u)) {
					if (isAdd = urlFrontier.put(u)) {
						Tracer.trace("url", "添加url: " + u.getOriUrl());
						count++;
					}
				}
			}
			Tracer.info("新增url数为: " + count);
		}
		return isAdd;
	}

	/**
	 * 释放代理
	 * 
	 * @author waynechen
	 */
	private void releaseProxy() {
		if (proxyDistributor != null) {
			proxyDistributor.releaseProxy(this);
		}
	}

	/**
	 * 如果有代理, 则为httpClient设置上代理
	 * 
	 * @author waynechen
	 * @throws Exception
	 */
	private void initHttpClient() {
		if (client != null)
			return;
		if (proxyDistributor == null)
			client = new MyHttpClient();
		else {
			ProxySetting p = proxyDistributor.getProxy(this);
			client = new MyHttpClient(p.getIp(), p.getProt());
		}
	}

	/**
	 * 消息处理
	 */
	@Override
	protected void handleMessage() {
		NodeMessage msg;
		while ((msg = popMessage()) != null)
			Tracer.debug(this.name + ":hey! i get a message!" + msg.getParameter());
	}
}

//
//
//
//
//
//
//
//
//
//
