package com.yaoandw.crawler;

import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import java.util.Set;

public class MyCrawler {
//	public static String baseUrl = "http://192.168.1.66/test";
//	public static String baseUrl = "http://www.actiz.com:8080/bbs/forum.php?mod=redirect&tid=30&goto=lastpost#lastpost";
//	public static String baseUrl = "http://www.actiz.com:8080/bbs";
	public static String baseUrl = "http://www.7kwx.com/modules/article/txtarticle.php?id=12";
	
	public static Properties configuration = new Properties();
	public static String configCrawlMode = "text";
	public static String configDownload = "false";
	public static String configFileSuffix = "txt";
	/**
	 * 使用种子初始化 URL 队列
	 * 
	 * @return
	 * @param seeds
	 *            种子URL
	 */
	private void initCrawlerWithSeeds(String[] seeds) {
		for (int i = 0; i < seeds.length; i++)
			LinkQueue.addUnvisitedUrl(seeds[i]);
	}

	private void initConfiguration(){
		InputStream is = this.getClass().getResourceAsStream("crawl_config.properties");
		try {
			configuration.load(is);
			configCrawlMode = configuration.getProperty("crawl_mode");
			configDownload = configuration.getProperty("download");
			configFileSuffix = configuration.getProperty("file_suffix");
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		System.out.println(configuration.toString());
	}
	/**
	 * 抓取过程
	 * 
	 * @return
	 * @param seeds
	 */
	public void crawling(String[] seeds) { // 定义过滤器，提取以http://www.lietu.com开头的链接
		LinkFilter filter = new LinkFilter() {
			public boolean accept(String url) {
				if (url.startsWith(baseUrl))
					return true;
				else
					return false;
			}
		};
		initConfiguration();
		// 初始化 URL 队列
		initCrawlerWithSeeds(seeds);
		int i = 1;
		// 循环条件：待抓取的链接不空且抓取的网页不多于1000
		while (!LinkQueue.unVisitedUrlsEmpty()
				&& LinkQueue.getVisitedUrlNum() <= 1000) {
			System.out.println(i++);
			// 队头URL出队列
			String visitUrl = (String) LinkQueue.unVisitedUrlDeQueue();
			System.out.println("visitUrl:" + visitUrl);
			if (visitUrl == null)
				continue;
			// 该 url 放入到已访问的 URL 中
			LinkQueue.addVisitedUrl(visitUrl);
			// 提取出下载网页中的 URL
			ExtracPageInfo obj = HtmlParserTool.extracLinksAndText(visitUrl, filter);
			Set<String> links = obj.getLinks();
			String text = obj.getText();
			String contentType = obj.getContentType();
			String title = obj.getTitle();
			String metaDesc = obj.getMetaDescription();
			if("text".equals(configCrawlMode)){
				String plainText = PlainTextTool.getPlainText(text);
				//
				PageIndexTool.createIndex(metaDesc,title,plainText, visitUrl, contentType);
			}else if("file".equals(configCrawlMode)){
				
			}
			//
			if("true".equals(configDownload))
				SaveFile.saveFile(visitUrl, contentType, text);
			// 新的未访问的 URL 入队
			for (String link : links) {
				LinkQueue.addUnvisitedUrl(link);
			}
		}
		//
		PageIndexTool.closeIndex();
	}

	// main 方法入口
	public static void main(String[] args) {
		MyCrawler crawler = new MyCrawler();
		// crawler.crawling(new String[]{"http://www.twt.edu.cn"});
//		crawler.crawling(new String[] { "http://www.lietu.com" });
		crawler.crawling(new String[] { baseUrl });
		// crawler.crawling(new
		// String[]{"http://www.lietu.com/images/news/Discuss.JPG"});
	}
}
