package org.apache.lucene.demo;

import java.io.File;
import java.io.IOException;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import com.cl.util.PropertiesUtil;

/**
 * 爬虫入口
 * 
 * @author 390576
 * 
 */
public class Crawler {
	// 使用种子初始化
	private void initCrawlerWithSeeds(String[] seeds) {
		for (int i = 0; i < seeds.length; i++) {
			LinkDB.addUnVisitedUrl(seeds[i]);
		}
	}

	// 爬取方法
	public void crawling(String[] seeds) {
		LinkFilter filter = new LinkFilter() {
			@Override
			public boolean accept(String url) {
				if (url.startsWith("http://news.163.com/")) {
					return true;
				} else {
					return false;
				}
			}
		};
		// 初始化队列
		initCrawlerWithSeeds(seeds);
		int i = 0;
		// 循环条件：待抓取的链接不空且抓取的网页不多于 10
		while (!LinkDB.unVisitedUrlsEmpty()
				&& LinkDB.getVisitedUrlNum() <= 10) {
			System.out.println("待抓取的Url="+LinkDB.getUnVisitedUrl().get(i));
			System.out.println("已经抓取的Url="+LinkDB.getVisitedUrlNum());
			i++;
			// 队头出列
			String visitUrl = LinkDB.unVisitedUrlDeQueue();
			if (visitUrl == null) {
				continue;
			}
			FileDownLoader downLoader = new FileDownLoader();
			//下载网页
			downLoader.downloadFile(visitUrl);
			//已访问url
			LinkDB.addVisitedUrl(visitUrl);
			//提取出下载网页中的 URL
			Set<String> links= HtmlParserTool.extracLinks(visitUrl,filter);
			//添加新增未访问的url
			for (String link: links) {
				LinkDB.addUnVisitedUrl(link);
			}
		}
		
		//要建立索引文件目录 
		File docFile = new File(PropertiesUtil.getProperties("docs.path"));
		if(docFile == null || !docFile.exists() || !docFile.canRead()){
			System.out.println("docs.path 为空,或者不存在或者不可读");
		}		
		//索引目录
		try {
			Directory directory = FSDirectory.open(new File(PropertiesUtil.getProperties("index.path")));
			Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
			IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_40, analyzer);
			IndexWriter writer = new IndexWriter(directory, iwc);
			loopDirectory(writer, docFile);
			writer.close();
			System.out.println("-------------------生成索引完毕-------------------");
		} catch (IOException e) {
			e.printStackTrace();
		}
	}
	
	private static void loopDirectory(IndexWriter writer,File file) {
		if (file != null && file.canRead()) {
			//文件目录
			if(file.isDirectory()){
				String[] fileNames = file.list();
				for (int i = 0; i < fileNames.length; i++) {
					loopDirectory(writer, new File(file,fileNames[i]));
				}
			}else{
				String title = HtmlParserTool.getNewsTitle(file.getAbsolutePath());
				System.out.println("title="+title);
				
				Document doc = new Document();
				doc.add(new StringField("path", file.getPath(), Field.Store.YES));
				doc.add(new TextField("title", title, Field.Store.YES));
//				doc.add(new StringField("keywords", title, Field.Store.YES));
				doc.add(new LongField("modified", file.lastModified(), Field.Store.NO));
				try {
					writer.addDocument(doc);
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
			
		}
	}

	public static void main(String[] args) {
		Crawler crawler = new Crawler();
		crawler.crawling(new String[]{"http://news.163.com/"});
	}
}
