package com.study.lucene;

import cn.hutool.core.io.IoUtil;
import cn.hutool.core.util.StrUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.*;
import org.apache.lucene.index.*;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.*;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.BytesRef;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;
import org.wltea.analyzer.lucene.IKAnalyzer;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Paths;
import java.util.List;

/**
 * @author zhuzi
 * 2023/8/12 14:45
 * Lucene
 */
@Slf4j
public class Demo {
    public static final String IDX_DIR = "/Users/zhuzi/Downloads/tmp/idx";

    public static final String filePath = "/Users/zhuzi/Downloads/tmp/index.txt";

    public static final String ARTICLE_IDX_DIR = "/Users/zhuzi/Downloads/tmp/idxarticle";
    public static final String ARTICLE_PATH = "/Users/zhuzi/workspace/soft/ideatool/idea/javastudy/springboot-validation/src/main/resources/news.txt";
    FSDirectory fsDirectory;
    DirectoryReader ir;
    IndexSearcher searcher;

    //文章的部分
    FSDirectory articleFsDirectory;
    DirectoryReader articleIr;
    IndexSearcher articleSearcher;

    @BeforeEach
    public void setUp() throws IOException {
        fsDirectory = FSDirectory.open(Paths.get(IDX_DIR));
        //获取读取索引的对象
        ir = DirectoryReader.open(fsDirectory);
        //可以理解为 jdbc connection
        searcher = new IndexSearcher(ir);


        articleFsDirectory = FSDirectory.open(Paths.get(ARTICLE_IDX_DIR));
        //如果创建文章索引报错，先注释这个几个
        articleIr = DirectoryReader.open(articleFsDirectory);
        articleSearcher = new IndexSearcher(articleIr);
    }

    @AfterEach
    public void after() throws IOException {
        ir.close();
        fsDirectory.close();
        IoUtil.close(articleIr);
        IoUtil.close(articleFsDirectory);
    }

    /**
     * 创建索引
     * segment(段 片) 可以包含多个doc
     * Document 一条记录,包含多个field
     * file(域) 相当于字段
     * analyze - 分词器 ,
     * term(词语,经过分词处理后的单元) 比如"休闲" "休" "闲"
     * <p>
     * .cfs ,cfe:compound 符合索引文件
     */
    @Test
    public void test() throws IOException {
        Analyzer analyzer = new StandardAnalyzer();
        creadIndex(analyzer);

    }

    private void creadIndex(Analyzer analyzer) throws IOException {
        IndexWriterConfig writerConfig = new IndexWriterConfig(analyzer);
        //每次创建都是删除重新创建
        writerConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
        //使用复合文件的方式
        writerConfig.setUseCompoundFile(true);
        //用来写索引
        IndexWriter writer = new IndexWriter(fsDirectory, writerConfig);

        List<String> lines = FileUtils.readLines(new File(filePath), StandardCharsets.UTF_8);

        for (String line : lines) {
            if (line.startsWith("id")) {
                continue;
            }
            String[] split = line.split(",");
            String id = split[0];
            //id,name,brand_name,cate_name,gmt_create,score,price
            String name = split[1];
            String brandName = split[2];
            String cateName = split[3];
            String gmtCreate = split[4];
            String score = split[5];
            String price = split[6];

            Document document = new Document();

            // 字段 域 Int。long float 这些number是不参与分词的
            document.add(new IntPoint("id", Integer.parseInt(id)));
            document.add(new StringField("idStr", id, Field.Store.NO));//更新使用
            document.add(new StoredField("id_stored", Integer.valueOf(id)));//保存 数字数据
            //TextField 会分词，采用倒排索引
            document.add(new TextField("name", name, Field.Store.YES));
            document.add(new TextField("brand_name", brandName, Field.Store.YES));

            // StringField 不会分词
            document.add(new StringField("cate_name", cateName, Field.Store.YES));

            document.add(new LongPoint("gmt_create", Long.parseLong(gmtCreate)));
            document.add(new StoredField("gmt_create_stored", Long.parseLong(gmtCreate)));

            document.add(new FloatPoint("score", Float.parseFloat(score)));
            document.add(new StoredField("score_stored", Float.parseFloat(score)));
            document.add(new IntPoint("price", Integer.parseInt(price)));
            document.add(new StoredField("price_stored", Integer.parseInt(price)));

            writer.addDocument(document);

        }

        System.out.println(StrUtil.format("提交了[{}]条数据", writer.numDocs()));
        //commit 会自动刷新和提交
        /*writer.flush();
        writer.commit();*/
        writer.close();
        fsDirectory.close();
    }


    /**
     * termQuery查询,不会把传入的关键词进行进一步处理
     */
    @Test
    public void termQuery() throws IOException {
        //构建查询语句|查询字段
        Term term = new Term("name", "耐");
        TermQuery termQuery = new TermQuery(term);
        searchResult(searcher, termQuery);
    }

    private static void searchResult(IndexSearcher searcher, Query termQuery) throws IOException {
        TopDocs topDocs = searcher.search(termQuery, 100);

        System.out.println(StrUtil.format("query语句[{}]共搜索到[{}]条", termQuery.toString(), topDocs.totalHits));
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            //docId :Lucene会为每一个document自动生成一个DocId,
            //不是new IntPoint("id"...)
            int doc = scoreDoc.doc;
            Document document = searcher.doc(doc);
            System.out.println(
                    StrUtil.format("docId:[{}]评分[{}]" +
                                    ",id:{} ,name:{} ,brand_name:{} ,cate_name:{}" +
                                    ",gmt_create:{} ,score:{} ,price:{}", doc, scoreDoc.score,
                            document.get("id_stored"),
                            document.get("name"),
                            document.get("brand_name"),
                            document.get("cate_name"),
                            document.get("gmt_create_stored"),
                            document.get("score_stored"),
                            document.get("price_stored")
                    ));

        }
    }

    /**
     * 获取分词之后某个域下面所有的terms,不依赖索引
     *
     * @throws IOException
     */
    @Test
    public void terms() throws IOException {
        String str = "耐克2020夏款休闲运动鞋空军一号CNe866";
        StandardAnalyzer analyzer = new StandardAnalyzer();
        final TokenStream tokenStream = analyzer.tokenStream("耐克", str);
        // addAttribute 指定stream指针移动的时候，需要把token的信息赋值给token
        CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
        //重置stream，以便之后的迭代重头开始
        tokenStream.reset();
        while (tokenStream.incrementToken()) {
            System.out.println(charTermAttribute);
        }
        tokenStream.end();
        tokenStream.close();
    }

    /**
     * 获取分词之后某个域下面所有的terms,依赖索引
     *
     * @throws IOException
     */
    @Test
    public void termBYIndex() throws IOException {
        IndexReader ir = DirectoryReader.open(fsDirectory);
        Terms tr = MultiFields.getTerms(ir, "name");
        TermsEnum iterator = tr.iterator();
        BytesRef bytesRef;
        while ((bytesRef = iterator.next()) != null) {
            String openTermStr = new String(bytesRef.bytes, bytesRef.offset, bytesRef.length, StandardCharsets.UTF_8);
            System.out.println("term值:" + openTermStr + "在" + iterator.docFreq() + "个文档中出现的频率");
        }
    }

    /**
     * 中文分词
     */
    @Test
    public void smartCn() throws IOException {
        SmartChineseAnalyzer analyzer = new SmartChineseAnalyzer();
        creadIndex(analyzer);
    }

    /**
     * ikanalyzer
     * 参数useSmart true 表示采用只能分词， 否则采用细粒度分词
     * <br>
     */
    @Test
    public void ikAnalyzer() throws IOException {
        String str = "雅诗兰黛小棕瓶面部修复精华露100ml";
        //str = "乐之者Java的网址是http://www.lzj.com;提供原创Java视频";
        IKSegmenter segmenter = new IKSegmenter(new StringReader(str), true);

        //和term是一个意思
        Lexeme next;
        while ((next = segmenter.next()) != null) {
            System.out.println(next.getLexemeText());
        }
    }

    /**
     * lucene 使用ikanalyzer
     */
    @Test
    public void testIkAnalyzerIndex() throws IOException {
        // 报错 AbstractMethodError ,原因是 ik分词器版本支持
        //下面报错找打位置，打印出报错代码，重写(类加上final)
        //ssertionError: TokenStream implementation classes or at least their incrementToken() implementation must be final
        //使用其他IK 没有上面问题
        IKAnalyzer analyzer = new IKAnalyzer(true);
        creadIndex(analyzer);
    }

    /**
     * Queryparse检索
     */
    @Test
    public void queryParse() throws IOException, ParseException {
        //query阶段使用的analyzerr和build阶段一定要一样
        IKAnalyzer analyzer = new IKAnalyzer(true);
        QueryParser queryParser = new QueryParser("name", analyzer);
        //空格
        queryParser.setDefaultOperator(QueryParser.Operator.AND);
        //通配符允许出现在第一位
        queryParser.setAllowLeadingWildcard(true);
        Query query = queryParser.parse("雅诗兰黛 小棕瓶");
        searchResult(searcher, query);
    }

    /**
     * booleanQuery检索
     */
    @Test
    public void booleanQuery() throws Exception {
        TermQuery query = new TermQuery(new Term("name", "手机"));
        TermQuery query1 = new TermQuery(new Term("name", "华为"));
        BooleanQuery.Builder add = new BooleanQuery.Builder().add(query, BooleanClause.Occur.MUST)
                .add(query1, BooleanClause.Occur.MUST);
        BooleanQuery booleanQuery = add.build();
        searchResult(searcher, booleanQuery);
    }

    /**
     * 数值类型的查询
     */
    @Test
    public void intQuery() throws Exception {
        //范围查询
        Query price = IntPoint.newRangeQuery("price", 500, 200000);
        searchResult(searcher, price);

        //准确的分词查询
        Query newExactQuery = IntPoint.newExactQuery("id", 4);
        searchResult(searcher, newExactQuery);
    }

    /**
     * 前缀查询
     */
    @Test
    public void prefixQuery() throws Exception {
        //以乐开头
        PrefixQuery prefixQuery = new PrefixQuery(new Term("cate_name", "乐"));
        searchResult(searcher, prefixQuery);

    }

    /**
     * 最佳摘要 和 高亮 显示
     * <br>
     * 生成网页关键字 描述 摘要
     */
    @Test
    public void createArticleIndex() throws Exception {
        articleFsDirectory = FSDirectory.open(Paths.get(ARTICLE_IDX_DIR));
        //DirectoryReader  articleIr = DirectoryReader.open(articleFsDirectory);
        IKAnalyzer analyzer = new IKAnalyzer(true);
        IndexWriterConfig writerConfig = new IndexWriterConfig(analyzer);
        //每次创建都是删除重新创建
        writerConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
        //使用复合文件的方式
        writerConfig.setUseCompoundFile(true);
        //用来写索引
        IndexWriter writer = new IndexWriter(articleFsDirectory, writerConfig);

        String content = FileUtils.readFileToString(new File(ARTICLE_PATH), StandardCharsets.UTF_8);
        Document document = new Document();
        document.add(new TextField("content", content, Field.Store.YES));

        writer.addDocument(document);
        System.out.println(StrUtil.format("提交了[{}]条数据", writer.numDocs()));

        writer.commit();
        articleFsDirectory.close();
    }

    /**
     * 摘要高亮
     */
    @Test
    public void keyWord() throws Exception {
        IKAnalyzer ikAnalyzer = new IKAnalyzer(true);
        QueryParser queryParser = new QueryParser("content", ikAnalyzer);
        Query query = queryParser.parse("大熊猫");

        TopDocs topDocs = articleSearcher.search(query, 100);
        System.out.println(StrUtil.format("query语句[{}]共搜索到[{}]条", query.toString(), topDocs.totalHits));
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;

        //高亮
        QueryScorer queryScorer = new QueryScorer(query);
        //获取段
        SimpleSpanFragmenter fragmenter = new SimpleSpanFragmenter(queryScorer, 150);

        SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<b><font color='red'", "</font></b>");
        Highlighter highlighter = new Highlighter(formatter, queryScorer);
        highlighter.setTextFragmenter(fragmenter);

        for (ScoreDoc scoreDoc : scoreDocs) {
            //docId :Lucene会为每一个document自动生成一个DocId,
            //不是new IntPoint("id"...)
            int doc = scoreDoc.doc;
            Document document = articleSearcher.doc(doc);
            System.out.println(
                    StrUtil.format("docId:[{}{}]文章内容[{}]", doc, scoreDoc.score, document.get("content")));

            //高亮后的最佳摘要
            String bestFragment = highlighter.getBestFragment(ikAnalyzer, "content", document.get("content"));
            System.out.println("最佳摘要" + bestFragment);
        }
    }

    //删除索引
    @Test
    public void delIndex() throws IOException {
        IKAnalyzer analyzer = new IKAnalyzer(true);
        IndexWriterConfig writerConfig = new IndexWriterConfig(analyzer);
        //用来写索引
        IndexWriter writer = new IndexWriter(fsDirectory, writerConfig);

        //通过Terms删除
        writer.deleteDocuments(new Term("name", "雅诗兰黛"));

        //通过ID删除
        Query query = IntPoint.newExactQuery("id", 2);
        writer.deleteDocuments(query);
        writer.forceMergeDeletes();
        writer.close();
        fsDirectory.close();
    }

    @Test
    public void updateIndex() throws IOException {
        IKAnalyzer analyzer = new IKAnalyzer(true);
        IndexWriterConfig writerConfig = new IndexWriterConfig(analyzer);
        //用来写索引
        IndexWriter writer = new IndexWriter(fsDirectory, writerConfig);


        Document document = new Document();

        // 字段 域 Int。long float 这些number是不参与分词的
        document.add(new IntPoint("id", Integer.parseInt("2")));
        document.add(new StringField("idStr", "2", Field.Store.NO));//更新使用
        document.add(new StoredField("id_stored", Integer.valueOf("2")));
        //TextField 会分词，采用倒排索引
        document.add(new TextField("name", "雅诗兰黛小棕瓶修复面部精华露100m1", Field.Store.YES));
        document.add(new TextField("brand_name", "雅诗兰黛/EsteeLauder", Field.Store.YES));
        // StringField 不会分词
        document.add(new StringField("cate_name", "彩妆", Field.Store.YES));
        document.add(new LongPoint("gmt_create", Long.parseLong("1596214173516")));
        document.add(new FloatPoint("score", Float.parseFloat("300")));
        document.add(new StoredField("score_stored", Float.parseFloat("300")));
        document.add(new IntPoint("price", Integer.parseInt("50000")));
        document.add(new StoredField("price_stored", Integer.parseInt("50000")));

        //这个方法是原子操作
        writer.updateDocument(new Term("id", "2"),document);
        writer.forceMergeDeletes();
        writer.close();
        fsDirectory.close();
    }
}
