package com.lagou;

import com.lagou.bean.Book;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.queryparser.xml.builders.BooleanQueryBuilder;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.junit.Test;

import java.io.IOException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;

/**
 * @author yx
 * @date 2024-04-07 13:49
 * @description:
 */
public class test {

    @Test
    public void testCreateIndex() throws IOException {
        // 1. 采集数据
        List<Book> bookList = new ArrayList<Book>();
        Book booka = new Book();
        booka.setId(1);
        booka.setDesc("Lucene Core is a Java library providing powerful indexing and search features, as well as spellchecking, hit highlighting and advanced analysis / tokenization capabilities.The PyLucene sub project provides Python bindings for Lucene Core.");
        booka.setName("Lucene");
        booka.setPrice(100.45f);
        bookList.add(booka);
        Book bookb = new Book();
        bookb.setId(11);
        bookb.setDesc("Solr is highly scalable, providing fully fault tolerant distributed indexing, search and analytics.It exposes Lucene 's features through easy to use JSON/HTTP interfaces or native clients for Java and other languages. ");
        bookb.setName("Solr");
        bookb.setPrice(320.45f);
        bookList.add(bookb);
        Book bookc = new Book();
        bookc.setId(21);
        bookc.setDesc("The Apache Hadoop software library is a framework that allows for the distributed processing of large data sets across clusters of computers using simple programming models.");
        bookc.setName("Hadoop");
        bookc.setPrice(620.45f);
        bookList.add(bookc);
        //2.创建文档
        List<Document> documentList = new ArrayList<>(bookList.size());
        for (Book book : bookList) {
            Document document = new Document();
            document.add(new TextField("id", book.getId().toString(), Field.Store.YES));
            document.add(new TextField("desc", book.getDesc(), Field.Store.YES));
            document.add(new TextField("name", book.getName(), Field.Store.YES));
            document.add(new TextField("price", book.getPrice().toString(), Field.Store.YES));
            documentList.add(document);
        }
        //3.创建分词器
        Analyzer analyzer = new StandardAnalyzer();
        //创建字典，声明索引库所在的位置
        Directory directory = FSDirectory.open(Paths.get("/Users/yx/Downloads/lucene/index"));
        //4.创建索引写入配置类和索引写入对象
        IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
        IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig);
        for (Document document : documentList) {
            indexWriter.addDocument(document);
        }
        //5.释放资源
        indexWriter.close();
    }

    @Test
    public void testQueryIndex() throws Exception {
        //1.创建query搜索对象
        //创建分词器
        Analyzer analyzer = new StandardAnalyzer();
        //创建搜索解析器
        QueryParser queryParser = new QueryParser("id", analyzer);
        // 创建搜索对象
        Query query = queryParser.parse("desc:java OR name:solr");
        //2. 创建Directory流对象,声明索引库位置
        Directory directory = FSDirectory.open(Paths.get("/Users/yx/Downloads/lucene/index"));
        //3.创建索引读取对象
        IndexReader indexReader = DirectoryReader.open(directory);
        //4.创建索引搜索对象
        IndexSearcher indexSearcher = new IndexSearcher(indexReader);
        System.out.println(indexSearcher.getSimilarity(false));
        System.out.println(indexSearcher.getSimilarity(true));
        indexSearcher.setSimilarity(indexSearcher.getSimilarity(true));
        //5.使用索引搜索对象，执行搜索，返回结果集TopDocs
        TopDocs topDocs = indexSearcher.search(query, 10);
        System.out.println("查询出来的总条数:" + topDocs.totalHits);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            System.out.println("scoreDoc:" + scoreDoc);
            int docId = scoreDoc.doc;
            Document doc = indexSearcher.doc(docId);
            System.out.println(doc);
            System.out.println("=============================");
            System.out.println("score:" + scoreDoc.score);
            System.out.println("docID:" + docId);
            System.out.println("bookId:" + doc.get("id"));
            System.out.println("name:" + doc.get("name"));
            System.out.println("price:" + doc.get("price"));
            System.out.println("desc:" + doc.get("desc"));
        }
        //释放资源
        indexReader.close();
    }

    public void booleanQuery(){
        BooleanQuery.Builder builder = new BooleanQuery.Builder();
        builder.add(null, BooleanClause.Occur.MUST);
    }

    public void testStandardQuery() throws Exception {
        Analyzer analyzer = new StandardAnalyzer();
        QueryParser queryParser = new QueryParser("desc", analyzer);
        Query query = queryParser.parse("java");
    }
}
