package com.zhaochao.demo;

import java.io.IOException;
import java.io.StringReader;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.search.highlight.Formatter;
import org.apache.lucene.search.highlight.Fragmenter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.Scorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.Version;

public class ArticleIndexDao {

	/**
	 * 保存到索引库（建立索引）
	 * 
	 * @param article
	 */
	public void save(Article article) {
		// 1，把Article转为Document
		Document doc = ArticleDocumentUtils.articleToDocument(article);
		// 2，添加到索引库中
		try {
			LuceneUtils.getIndexWriter().addDocument(doc); // 添加
			LuceneUtils.getIndexWriter().commit(); // 提交更改
		} catch (Exception e) {
			throw new RuntimeException(e);
		}
	}

	/**
	 * 删除索引
	 * 
	 * Term ：某字段中出现的某一个关键词（在索引库的目录中）
	 * 
	 * @param id
	 */
	public void delete(String id) {
		try {
			Term term = new Term("id", id);
			LuceneUtils.getIndexWriter().deleteDocuments(term); // 删除所有含有这个Term的Document
			LuceneUtils.getIndexWriter().commit(); // 提交更改
		} catch (Exception e) {
			throw new RuntimeException(e);
		}
	}

	/**
	 * 更新索引
	 * 
	 * @param article
	 */
	public void update(Article article) {
		try {
			Term term = new Term("id", article.getId()); // 一定要使用Lucene的工具类把数字转为字符串！
			Document doc = ArticleDocumentUtils.articleToDocument(article);
			LuceneUtils.getIndexWriter().updateDocument(term, doc); // 更新就是先删除再添加
			LuceneUtils.getIndexWriter().commit(); // 提交更改
		} catch (Exception e) {
			throw new RuntimeException(e);
		}
	}

	/**
	 * * 搜索 用于分页的
	 * 
	 * @param queryString
	 *            查询条件
	 * @param first
	 *            从结果列表的哪个索引开始获取数据
	 * @param max
	 *            最多获取多少条数据（如果没有这么多，就把剩余的都返回）
	 * 
	 * @return 一段数据列表 + 符合条件的总记录数
	 */
	public QueryResult search(String queryString, int first, int max ,TermQuery termQuery) {
		try {
			// 1，把查询字符串转为Query对象（在title与content中查询）
			QueryParser queryParser = new MultiFieldQueryParser(new String[] { "title", "content" },LuceneUtils.getAnalyzer());
			
			Query query = queryParser.parse(queryString);

			BooleanQuery booleanQuery = new BooleanQuery();
			
			if(queryString!=null && !"".equals(queryString.trim())){
				booleanQuery.add(query, Occur.MUST);
			}
			if(null!=termQuery){
				booleanQuery.add(termQuery, Occur.MUST);
			}


			// 2，执行查询，得到中间结果
			IndexReader reader = LuceneUtils.getIndexReader();
			IndexSearcher indexSearcher = new IndexSearcher(reader);
			
			
			TopDocs topDocs = indexSearcher.search(booleanQuery, first + max); // 最多返回前n条数据，这里要计算好，要返回足够数量的数据
			int count = topDocs.totalHits; // 符合条件的总记录数
			// 3，处理数据
			List<Article> list = new ArrayList<Article>();
			int endIndex = Math.min(first + max, topDocs.scoreDocs.length); // 计算结束的边界
			for (int i = first; i < endIndex; i++) { // 应只取一段数据
				// 根据内部编号获取真正的Document数据
				int docId = topDocs.scoreDocs[i].doc;

				Document doc = indexSearcher.doc(docId);
				// 把Document转换为Article

				Article article = ArticleDocumentUtils.documentToArticle(doc);
				article.setTitleHightLight(getHighligher(booleanQuery, doc, "title"));
				article.setContentHightLight(getHighligher(booleanQuery, doc, "content"));

				list.add(article);
			}
			// 4，封装结果并返回
			return new QueryResult(list, count);

		} catch (

		Exception e)

		{
			throw new RuntimeException(e);
		}
	}

	private String getHighligher(Query query, Document doc, String field)
			throws IOException, InvalidTokenOffsetsException {
		String text = doc.get(field);
		/*
	
		TokenStream tokenStream = LuceneUtils.getAnalyzer().tokenStream(field, text);
		int maxNumFragmentsRequired = 2;
		QueryScorer scorer = new QueryScorer(query, field);
		Highlighter highlighter = new Highlighter(scorer);
		highlighter.setTextFragmenter(new SimpleFragmenter(10));
		String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "...");
*/
		

        QueryScorer queryScorer = new QueryScorer(query);//如果有需要，可以传入评分
        //设置高亮标签
        Formatter formatter = new SimpleHTMLFormatter("<font color=red>", "</font>");
        //高亮分析器
        Highlighter hl = new Highlighter(formatter, queryScorer);
        Fragmenter fragmenter = new SimpleSpanFragmenter(queryScorer);
        hl.setTextFragmenter(fragmenter);
        //获取返回结果
        String str = hl.getBestFragment(LuceneUtils.getAnalyzer(), field,text);
		
		return str;
	}

}