/*
 * Copyright (c) 2010 CCX(China) Co.,Ltd. All Rights Reserved.
 *
 * This software is the confidential and proprietary information of
 * CCX(China) Co.,Ltd. ("Confidential Information").
 * It may not be copied or reproduced in any manner without the express 
 * written permission of CCX(China) Co.,Ltd.
 * 
 *	Created on 2010-12-27 下午04:35:05
 */
package cn.com.ccxe.core.crawl.article;

import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;

import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import cn.com.ccxe.core.crawl.Fetcher;
import cn.com.ccxe.core.db.WebDB;
import cn.com.ccxe.core.db.WebWorkQueues;
import cn.com.ccxe.core.entity.Article;
import cn.com.ccxe.core.entity.ArticleLink;
import cn.com.ccxe.core.entity.WebData;
import cn.com.ccxe.core.index.Indexer;
import cn.com.ccxe.core.io.MirrorWrite;
import cn.com.ccxe.core.util.Configurations;

/**
 * 
 *
 * @author hetao
 */
public class ArticleCrawler implements Runnable {

	final static Logger 		logger = LoggerFactory.getLogger(ArticleCrawler.class);
	
	/** 重复抓取计数器 */
	private int 				refetchCount = 0;
	/** 重复抓取的次数 */
	private static int 			REFETCH_AMOUNT = Configurations.getInt("refetch.amount", 2);
	/** 处理成功的任务数 */
	private volatile int 		successTaskNum = 0;
	/** 处理失败的任务数 */
	private volatile int 		failedTaskNum = 0;
	/** 重启的任务数 */
	private volatile int 		restartTaskNum = 0;
	private String 				state;
	private String 				name;
	private AtomicBoolean		crawlerSwitch = new AtomicBoolean(true);
	
	public ArticleCrawler(String name) {
		this.name = name;
		this.state = "NEW";
	}

	public void run() {
		final WebWorkQueues workQueue = WebWorkQueues.getInstance();
		final WebDB webDB = WebDB.getInstance();
		while (crawlerSwitch.get()) {
			this.state = "RUN";
			try {
				if (workQueue.size() > 0) {
					ArticleLink link = workQueue.consume();
					if (link != null) {
						process(link);
						webDB.update(link.getPath(), link);
						successTaskNum++;
						logger.debug("采集成功！"+link);
					}
				} else {
//					logger.debug("文章工作队列中还没有数据....");
					Thread.sleep(10 * 1000);
					this.state = "SLEEP";
				}
			} catch (Exception e) {
				failedTaskNum++;
				e.printStackTrace();
			}
		}
		
	}

	/**
	 * 处理网页并进行归类。
	 * 
	 * @param data
	 *            网页数据。
	 */
	public void process(ArticleLink link) {

		Fetcher fetcher = new Fetcher();
		try {
			WebData data;
			data = fetcher.fetch(link);
			if (data == null 
					|| data.getHtml() == null 
					|| data.getHtml().length() <= 0) {
				logger.debug("采集{}时，没有发现内容！", link.getPath());
				return;
			}
			ArticleExtractor ae = new ArticleExtractor(data);
			Article article = ae.extractor(link);
			if(!StringUtils.isEmpty(link.getTitle())) {
				article.setTitle(link.getTitle());
			}
			article.setModuleId(link.getModuleId());
			logger.debug("解析出来的文章：{}",article);
			Indexer.put(article);
			MirrorWrite write = new MirrorWrite();
			write.writeFile(data.getHtml(), data.getLink().getPath());
		} catch (IOException e) {
			failedTaskNum++;
			e.printStackTrace();
			// 如果失败将再进行抓取，但次数不能超过两次。
			if (refetchCount < REFETCH_AMOUNT) {
				restartTaskNum++;
				process(link);
			}
		}
		refetchCount = 0;
	}


	public int getRestartTaskNum() {
		return restartTaskNum;
	}

	public int getFailedTaskNum() {
		return failedTaskNum;
	}

	public int getSuccessTaskNum() {
		return successTaskNum;
	}

	public void getCrawlerInfo(Logger log) {
		log.info("处理成功的任务数:{}", successTaskNum);
		log.info("处理失败的任务数:{}", failedTaskNum);
		log.info("重启的任务数:{}", restartTaskNum);
	}

	public String state() {
		return state;
	}

	public String name() {
		return name;
	}
	
	public void shutDown() {
		crawlerSwitch.set(false);
	}
	
	public static void main(String[] args) {
		String url = "http://money.163.com/11/0113/06/6Q8PGFTF00253B0H.html";
		ArticleLink link = new ArticleLink(url);
		link.setModuleId(21);
		ArticleCrawler crawler = new ArticleCrawler("crawl");
		crawler.process(link);
	}
	
}
