package org.study.lucene.api.index;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.*;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.IOUtils;
import org.study.lucene.api.constant.PathConstants;
import org.wltea.analyzer.lucene.IKAnalyzer;

import java.io.IOException;
import java.nio.file.*;

/**
 * @author Administrator
 * @date 2021-12-23
 */
public class LuceneTest01 {
    public static void main(String[] arg) throws Exception {
        // createAndSearch();
        testCreate();
    }

    /**
     * 参考 https://lucene.apache.org/core/10_1_0/core/index.html
     */
    public static void createAndSearch() throws IOException, ParseException {
        // ================================================== 创建索引
        Analyzer analyzer = new StandardAnalyzer();

        Path indexPath = Files.createTempDirectory("tempIndex");
        Directory directory = FSDirectory.open(indexPath);
        IndexWriterConfig config = new IndexWriterConfig(analyzer);
        IndexWriter indexWriter = new IndexWriter(directory, config);
        Document document = new Document();
        String text = "This is the text to be indexed.";
        document.add(new Field("fieldName", text, TextField.TYPE_STORED));
        indexWriter.addDocument(document);
        indexWriter.close();

        // ================================================== 查询
        // Now search the index:
        DirectoryReader reader = DirectoryReader.open(directory);
        IndexSearcher indexSearcher = new IndexSearcher(reader);
        // Parse a simple query that searches for "text":
        QueryParser parser = new QueryParser("fieldName", analyzer);
        Query query = parser.parse("text");
        ScoreDoc[] hits = indexSearcher.search(query, 10).scoreDocs;
        System.out.println(hits.length == 1);
        // Iterate through the results
        StoredFields storedFields = indexSearcher.storedFields();
        for (int i = 0; i < hits.length; i++) {
            Document hitDoc = storedFields.document(hits[i].doc);
            System.out.println(text.equals(hitDoc.get("fieldName")));
        }
        reader.close();
        directory.close();
        IOUtils.rm(indexPath);
    }

    public static void testCreate() throws IOException {
        // step1、创建文档对象
        Document document = new Document();
        // 创建并添加字段信息。参数：字段名称、字段值、是否存储(Store.YES 代表存储到文档列表，Store.NO 代表不存储)
        // 可以用来范围搜索
        document.add(new LongPoint("id", 1));
        // 可以用来 sort 排序
        document.add(new NumericDocValuesField("id", 1));
        // 单纯用来获得 id 值
        document.add(new StoredField("id", 1));
        // 这里 title 字段需要用 TextField：既创建索引又会被分词(StringField 会创建索引但不会被分词)
        document.add(new StringField("tag", "小道消息", Field.Store.YES));
        document.add(new TextField("title", "谷歌突然宕机三小时", Field.Store.YES));

        // step2、索引目录类：指定索引在硬盘中的位置
        Path path = Paths.get(PathConstants.INDEX_PATH);
        // 如果不想存储在硬盘上，则可以使用 RAM 存储：Directory directory = new RAMDirectory();
        Directory directory = FSDirectory.open(path);
        // step3、创建分词器对象
        // 标准分词器，并不合适中文分词，一般会使用第三方提供的分词器，如：ChineseAnalyzer(已废弃)、CollationAnalyzer 等，这里使用 IK 分词器
        Analyzer analyzer = new IKAnalyzer();
        // step4、索引写出工具的配置对象
        IndexWriterConfig conf = new IndexWriterConfig(analyzer);
        conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
        // step5、创建索引的写出工具类，参数：索引的目录和配置信息
        IndexWriter indexWriter = new IndexWriter(directory, conf);
        // step6、把文档交给 IndexWriter
        indexWriter.addDocument(document);
        // step7、提交
        indexWriter.commit();
        // step8、关闭
        indexWriter.close();
    }
}
