package org.wltea.analyzer.sample;

import java.io.IOException;
import java.io.StringReader;
import java.nio.file.FileSystems;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Fragmenter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.wltea.analyzer.lucene.IKAnalyzer;

/**
 * 每小时创建一次lucene索引
 * @author uin
 *
 */
public class SceneLectureLuceneIndexCreateJob  {
 
  public void execute( )  {
    try {
      createIndex();
    } catch ( Exception e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }
  }
  
  public void createIndex() throws IOException   {
     

    // 将采集到的数据封装到Document对象中
    List<Document> docList = new ArrayList<Document>();
    Document document;
    document = new Document();
    // store:如果是yes，则说明存储到文档域中

    Field content = new TextField("content", "这是一条测试数据", Field.Store.YES);

    // 将field域设置到Document对象中
    document.add(content); 

    docList.add(document);
    //JDK 1.7以后 open只能接收Path/////////////////////////////////////////////////////

    // 创建分词器，标准分词器
//    Analyzer analyzer = new StandardAnalyzer();
    Analyzer analyzer = new IKAnalyzer(true); 

    // 创建IndexWriter
    // IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_6_5_0,analyzer);
    IndexWriterConfig cfg = new IndexWriterConfig(analyzer);
    
    // 指定索引库的地址
//     File indexFile = new File("D:\\L\a\Eclipse\\lecencedemo\\");
//     Directory directory = FSDirectory.open(indexFile);
    Directory directory = FSDirectory.open(FileSystems.getDefault().getPath("D:\\index\\"));

    IndexWriter writer = new IndexWriter(directory, cfg);
    writer.deleteAll(); //清除以前的index
    // 通过IndexWriter对象将Document写入到索引库中
    for (Document doc : docList) {
        writer.addDocument(doc);
    }

    // 关闭writer
    writer.close();
}
  
  
 
public void indexSearch() throws ParseException, InvalidTokenOffsetsException  {
    // 创建query对象
    Analyzer analyzer = new StandardAnalyzer();
    // 使用QueryParser搜索时，需要指定分词器，搜索时的分词器要和索引时的分词器一致
    // 第一个参数：默认搜索的域的名称
    QueryParser parser = new QueryParser("content", analyzer);

    // 通过queryparser来创建query对象
    // 参数：输入的lucene的查询语句(关键字一定要大写)
    Query query = parser.parse("content:真的");
    
    QueryScorer scorer = new QueryScorer(query);// 查询得分
    Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);// 得到得分的片段，就是得到一段包含所查询的关键字的摘要
    SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter(
            "<b><font color='red'>", "</font></b>");// 对查询的数据格式化；无参构造器的默认是将关键字加粗
    Highlighter highlighter = new Highlighter(simpleHTMLFormatter, scorer);// 根据得分和格式化
    highlighter.setTextFragmenter(fragmenter);// 设置成高亮
  
    // 创建IndexSearcher
    // 指定索引库的地址
    try {
//      File indexFile = new File("D:\\Lpj\\Eclipse\\lecencedemo\\");
//      Directory directory = FSDirectory.open(indexFile);
        // 1、创建Directory
        //JDK 1.7以后 open只能接收Path
        Directory directory = FSDirectory.open(FileSystems.getDefault().getPath("D:\\index\\"));
        IndexReader reader = DirectoryReader.open(directory);
        IndexSearcher searcher = new IndexSearcher(reader);
        // 通过searcher来搜索索引库
        // 第二个参数：指定需要显示的顶部记录的N条
        TopDocs topDocs = searcher.search(query, 10);

        // 根据查询条件匹配出的记录总数
        int count = topDocs.totalHits;
        System.out.println("匹配出的记录总数:" + count);
        // 根据查询条件匹配出的记录
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;

        for (ScoreDoc scoreDoc : scoreDocs) {
            // 获取文档的ID
            int docId = scoreDoc.doc;

            // 通过ID获取文档
            Document doc = searcher.doc(docId);
            String content = doc.get("content");
            System.out.println("查询内容：" + content);
            TokenStream tokenStream = analyzer.tokenStream("content",
                new StringReader(content));// TokenStream将查询出来的搞成片段，得到的是整个内容
            System.out.println(highlighter.getBestFragment(tokenStream, content));// 将权重高的摘要显示出来，得到的是关键字内容
            System.out.println("==========================");
            // System.out.println("商品描述：" + doc.get("description"));
        }
        // 关闭资源
        reader.close();
    } catch (IOException e) {
        e.printStackTrace();
    }

}
  
  public static void main(String[] args)  {
    SceneLectureLuceneIndexCreateJob s = new SceneLectureLuceneIndexCreateJob();
    s.execute();
  }

}
