package com.ada.ilucene;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.nio.file.Path;
import java.nio.file.Paths;


import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;

public class TestHighlighter {
public static void main(String[] args) throws IOException, InvalidTokenOffsetsException {
	Path path = Paths.get("D:\\luncene");
	Directory directory = FSDirectory.open(path);
	
	DirectoryReader ireader = DirectoryReader.open(directory);

      IndexSearcher search = new IndexSearcher(ireader);  
      Term term = new Term("fieldname","text");  
      Query query = new TermQuery(term);  
      TopDocs topDocs = search.search(query, 10);  
      ScoreDoc[] hits = topDocs.scoreDocs;  
      //正常产生的查询  
      for(int i=0;i<hits.length;i++){  
          Document doc = search.doc(hits[i].doc);  
          System.out.println(doc.get("fieldname")+":");  
      }  
      //高亮设置  
      Analyzer analyzer = new StandardAnalyzer();//设定分词器  
      SimpleHTMLFormatter simpleHtmlFormatter = new SimpleHTMLFormatter("<B>","</B>");//设定高亮显示的格式，也就是对高亮显示的词组加上前缀后缀  
      Highlighter highlighter = new Highlighter(simpleHtmlFormatter,new QueryScorer(query));  
      highlighter.setTextFragmenter(new SimpleFragmenter(150));//设置每次返回的字符数.想必大家在使用搜索引擎的时候也没有一并把全部数据展示出来吧，当然这里也是设定只展示部分数据  
      for(int i=0;i<hits.length;i++){  
          Document doc = search.doc(hits[i].doc);  
          TokenStream tokenStream = analyzer.tokenStream("",new StringReader(doc.get("fieldname")));  
          String str = highlighter.getBestFragment(tokenStream, doc.get("fieldname"));  
          System.out.println(str);  
      }  
  }  
}
