package com.yaoandw.givebook.site;

import java.util.ArrayList;
import java.util.List;
import java.util.Set;

import org.apache.log4j.Logger;

import com.yaoandw.crawler.ExtracPageInfo;
import com.yaoandw.crawler.HtmlParserTool;
import com.yaoandw.crawler.PageIndexTool;
import com.yaoandw.crawler.PlainTextTool;
import com.yaoandw.crawler.UrlTool;
import com.yaoandw.givebook.BookCrawler;
import com.yaoandw.storage.FrontierFactory;
import com.yaoandw.storage.SimpleCrawlUrl;

public abstract class AbstractSite implements ISite{
	private static Logger logger = Logger.getLogger(AbstractSite.class);
	private static long crawledCnt = 0;
	public static long getCrawledCnt(){
		return crawledCnt;
	}
	private SimpleCrawlUrl crawlUrl;
	public AbstractSite(SimpleCrawlUrl crawlUrl){
		this.crawlUrl = crawlUrl;
	}
	public void run(){
		crawling();
	}
	
	//========================
	/**
	 * 抓取过程
	 * 
	 * @return
	 * @param seeds
	 */
	public void crawling() { // 定义过滤器，提取以http://www.lietu.com开头的链接
		try{
			// 初始化 URL 队列
	//		initCrawlerWithSeeds(seeds);
	//		int i = 1;
			SimpleCrawlUrl visitUrl = crawlUrl;
			// 循环条件：待抓取的链接不空且抓取的网页不多于1000
			if (visitUrl != null) {
	//			LinkQueue.addVisitedUrl(visitUrl);
				FrontierFactory.getVisitedFrontier().add(visitUrl.getOriUrl());
				countPage();
	//			System.out.println(i++);
				// 队头URL出队列
				// 该 url 放入到已访问的 URL 中
				// 提取出下载网页中的 URL
				ExtracPageInfo obj = HtmlParserTool.extracLinksAndText(visitUrl.getOriUrl(), null);
				Set<String> links = obj.getLinks();
				String text = obj.getText();
				String contentType = obj.getContentType();
				String title = obj.getTitle();
				String metaDesc = obj.getMetaDescription();
	//			if("text".equals(configCrawlMode)){
				String plainText = PlainTextTool.getPlainText(text);
	//				//
	//				PageIndexTool.createIndex(metaDesc,title,plainText, visitUrl, contentType);
	//			}else if("file".equals(configCrawlMode)){
	//				
	//			}
	//			//
	//			if("true".equals(configDownload))
	//				SaveFile.saveFile(visitUrl, contentType, text);
				// 新的未访问的 URL 入队
//				List<SimpleCrawlUrl> newUrlList = new ArrayList<SimpleCrawlUrl>();
				for (String link : links) {
					if(ifTheLinkIsTarget(link)){//txt file,make index
						PageIndexTool.createTxtFileIndex(metaDesc, title, plainText, visitUrl.getOriUrl(), contentType, link);
						logger.debug("found url : "+link);
					}else if(ifTheLinkContinueCrawl(link)){//else continue crawling
						link = UrlTool.processUrlHash(link);
						SimpleCrawlUrl newUrl = addUnvisitedUrl(link,visitUrl);
//							if(newUrl != null)
//								newUrlList.add(newUrl);
					}
				}
				logger.debug(Thread.currentThread().getId()+",visitUrl:" + visitUrl.getOriUrl());//+",find Url:"+StringUtil.extractList(newUrlList));
			}
		}catch(Exception e){
			logger.error("",e);
			Thread.currentThread().interrupt();
		}
		//
//		PageIndexTool.closeIndex();
	}
	private void countPage(){
		synchronized (getSiteUrl()) {
			crawledCnt ++;
		}
	}
	private SimpleCrawlUrl addUnvisitedUrl(String url,SimpleCrawlUrl visitUrl){
		if(url != null && visitUrl.getLayer() <BookCrawler.configCrawlLayer){
//			LinkQueue.addUnvisitedUrl(url);
			SimpleCrawlUrl crawlUrl = FrontierFactory.getFrontier(null).putUrl(new SimpleCrawlUrl(url, visitUrl.getLayer()+1));
			return crawlUrl;
//			synchronized (crawler) {
//				crawler.assign(this.getClass());
//			}
		}
		return null;
	}
	protected abstract String getSiteUrl();
	protected abstract boolean ifTheLinkIsTarget(String link);
	protected abstract boolean ifTheLinkContinueCrawl(String link);
}
