package com.doris.lucene;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;

import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLEncoder;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.store.FSDirectory;
import org.wltea.analyzer.lucene.IKAnalyzer;

/**
 * 索引搜索
 * 
 * @author root
 *
 */
public class LuceneSearcher {

	private static final Logger LOGGER = Logger.getLogger(LuceneSearcher.class);
	private static final String CONTENT = "content";
	private static Analyzer analyzer = new IKAnalyzer();
	private static QueryParser parser = new QueryParser(CONTENT, analyzer);

	public LuceneSearcher() {
	}

	/**
	 * 按搜索词搜索并分页
	 * 
	 * @param queryWord
	 *            搜索词
	 * @param pageNum
	 *            分页
	 * @return 分页对象
	 */
	public Pager search(String queryWord, int pageNum) {
		LOGGER.info("queryWord=" + queryWord);
		List<LuceneDocument> results = new ArrayList<LuceneDocument>();
		Pager pager = new Pager(0, results);
		DirectoryReader reader = null;
		try {
			String indexDir = getIndexDir();
			if (indexDir == null) {
				return pager;
			}
			FSDirectory dir = FSDirectory.open(new File(indexDir));
			reader = DirectoryReader.open(dir);
			IndexSearcher searcher = new IndexSearcher(reader);
			Query query = parser.parse(queryWord);
			TopDocs docs = searcher.search(query, null, pageNum * Constants.NUM_PER_PAGE);
			ScoreDoc[] hits = docs.scoreDocs;
			int numTotalHits = docs.totalHits;
			int end = Math.min(numTotalHits, pageNum * Constants.NUM_PER_PAGE);
			for (int i = (pageNum - 1) * Constants.NUM_PER_PAGE; i < end; i++) {
				Document doc = searcher.doc(hits[i].doc);
				LuceneDocument result = buildLuceneDocument(query, doc);
				results.add(result);
			}
			pager.setTotal(numTotalHits);
			pager.setResults(results);
		} catch (ParseException | IOException e) {
			LOGGER.error("索引查询失败", e);
		} finally {
			try {
				if (reader != null) {
					reader.close();
				}
			} catch (IOException e) {
				LOGGER.error("索引文件读写失败", e);
			}
		}
		return pager;
	}

	/**
	 * 生成搜索结果记录
	 * 
	 * @param query
	 *            查询对象
	 * @param doc
	 *            查询结果
	 * @return 结果记录
	 * @throws IOException
	 */
	private LuceneDocument buildLuceneDocument(Query query, Document doc) throws IOException {
		String path = doc.get("path");
		String content = doc.get(CONTENT);
		if (isNull(path) || isNull(content)) {
			return null;
		}
		TokenStream tokenStream = analyzer.tokenStream(CONTENT, new StringReader(content));
		SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<font color='red'>", "</font>");
		Highlighter highlighter = new Highlighter(simpleHTMLFormatter, new SimpleHTMLEncoder(), new QueryScorer(query));
		highlighter.setTextFragmenter(new SimpleFragmenter(100));
		try {
			String highLightText = highlighter.getBestFragment(tokenStream, content);
			if (highLightText == null) {
				highLightText = content.length() > 100 ? content.substring(0, 100) : content;
			}
			return new LuceneDocument(doc.get("name"), path.replaceAll("\\\\", "/"), highLightText);
		} catch (InvalidTokenOffsetsException e) {
			LOGGER.error("内容高亮失败", e);
			return null;
		}
	}

	private boolean isNull(String word) {
		return word == null || word.isEmpty();
	}

	/**
	 * 获取已经完成的最新的索引目录
	 * 
	 * @return 索引目录
	 */
	private String getIndexDir() {
		Date tmpDate = null;
		String rootDir = LuceneProperties.getIndexDir();
		for (File file : new File(rootDir).listFiles()) {
			if (file.isFile() || file.equals(new File(LuceneProperties.getLockedDir()))) {
				continue;
			}
			try {
				Date date = new SimpleDateFormat(Constants.DATE_TIME_FORMAT).parse(file.getName());
				if (date != null && (tmpDate == null || date.compareTo(tmpDate) > 0)) {
					tmpDate = date;
				}
			} catch (java.text.ParseException e) {
				LOGGER.error("索引目录名不能格式化为日期", e);
			}
		}
		if (tmpDate != null) {
			return rootDir + "/" + new SimpleDateFormat(Constants.DATE_TIME_FORMAT).format(tmpDate);
		}
		return null;
	}

	public static void main(String[] args) {
		LOGGER.info(new LuceneSearcher().getIndexDir());
		Pager pager = new LuceneSearcher().search("亚信", 1);
		for (LuceneDocument luceneDocument : pager.getResults()) {
			LOGGER.info(luceneDocument.getPath());
		}
	}

}
