package com.gcb.lucene;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;

public class FileIndexer {
	
	private static Analyzer analyzer = null;
	
	

	public FileIndexer() {
		// TODO Auto-generated constructor stub
		analyzer = new StandardAnalyzer(Version.LUCENE_30);//标准的分词器
		//analyzer = new IKAnalyzer();
	}
	
	/**
	 * 使用批定的分词器对指定的文本进行分词，并打印结果
	 * @param analyzer
	 * @param text
	 * @throws Exception
	 */
	public static void testAnalyzer(Analyzer analyzer, 
			String text) throws Exception {
		System.out.println("当前使用的分词器：" + analyzer.getClass());
	
		TokenStream tokenStream = analyzer.
			tokenStream("content", new StringReader(text));
		tokenStream.addAttribute(TermAttribute.class);
	
		while (tokenStream.incrementToken()) {
			TermAttribute termAttribute = tokenStream.
				getAttribute(TermAttribute.class);
			System.out.println(termAttribute.term());
		}
	}
	
	/**
	 * 将文件对象转成lucene的文档对象
	 * @param file	txt文件
	 * @return Document
	 * @author Creat at 2012-3-8
	 */
	public Document createDocument(File file){
		Document doc = new Document();
		try {
			Reader reader = new InputStreamReader(new FileInputStream(file), "GBK");
			doc.add(new Field("content", reader));
			doc.add(new Field("title", file.getName().substring(0, file.getName().indexOf(".")), 
					Field.Store.NO, Field.Index.ANALYZED));
			doc.add(new Field("path", file.getCanonicalPath(), 
					Field.Store.YES, Field.Index.NOT_ANALYZED));
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (UnsupportedEncodingException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return doc;
	}
	
	public Query creatQuery(String queryString, Analyzer analyzer){
		Query query = null;
		try {
			query = MultiFieldQueryParser.parse(Version.LUCENE_30, queryString, 
					QUERY_FIELD, QUERY_FLAGS, analyzer);
		} catch (ParseException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return query;
	}
	
	/**
	 * 在索引目录创建索引库
	 * @param indexDir	索引目录
	 * @param dataDir 文件目录
	 * @author Creat at 2012-3-8
	 */
	public void fileIndex(File indexDir, File dataDir){
		if(indexDir == null)
			indexDir = new File("F:\\indexDir");
		if(!indexDir.exists()){
			indexDir.mkdirs();
		}
		File[] files = dataDir.listFiles();
		if(files != null && files.length > 0){
			try {
				Long start_time = System.currentTimeMillis();
				FSDirectory dir = FSDirectory.open(indexDir);//存放在文件系统中
				IndexWriter indexWriter = new IndexWriter(dir, analyzer, true, 
						IndexWriter.MaxFieldLength.LIMITED);
				for (int i = 0; i < files.length; i++) {
					File file = files[i];
					Document doc = createDocument(file);
					indexWriter.addDocument(doc);
				}
				indexWriter.optimize();
				indexWriter.close();
				System.out.println("总共花费" + (System.currentTimeMillis()-start_time) + "毫秒");
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}
	}
	
	/**
	 * 在指定索引库中搜索
	 * @param indexDir	索引库位置
	 * @param queryString	关键字
	 * @param queryCount	返回数量
	 * @return List
	 * @author Creat at 2012-3-13
	 */
	@SuppressWarnings({ "rawtypes", "unchecked" })
	public List fileSearch(File indexDir, String queryString, int queryCount){
		List list = new ArrayList();
		Directory dir = null;
		if(indexDir == null)
			indexDir = new File("F:\\indexDir");
		try {
			Long start_time = System.currentTimeMillis();
			dir = FSDirectory.open(indexDir);
			IndexSearcher indexSearcher = new IndexSearcher(dir);
			Query query = creatQuery(queryString, analyzer);
			TopDocs topDocs = indexSearcher.search(query, queryCount);
			ScoreDoc[] hits = topDocs.scoreDocs;
			for (int i = 0; i < hits.length; i++) {
				Document doc = indexSearcher.doc(hits[i].doc);
				list.add(doc.get("path"));
			}
			indexSearcher.close();
			System.out.println("总共花费" + (System.currentTimeMillis()-start_time) + "毫秒");
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}                
		return list;
	}

	public static final String[] QUERY_FIELD = {"title", "content"};
	public static final BooleanClause.Occur[] QUERY_FLAGS = {
		BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD };
	
}
