package com.lanou.shunke.admin;

import org.apache.commons.io.FileUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.junit.Test;

import java.io.File;
import java.io.IOException;
import java.nio.file.Paths;
import java.util.Arrays;

public class LuceneDemo {
    private static final String data_DIR = "D:/data";
    private static final String index_DIR = "D:/index";

    /*生成索引*/
    @Test
    public void index() throws IOException {
        /*停用词*/
        String[] stopwords = new String[]{",",".","。","的","了","呢"};
        CharArraySet charArraySet = new CharArraySet(Arrays.asList(stopwords), true);
        /*分词器 中文*/
        Analyzer analyzer = new SmartChineseAnalyzer(charArraySet);

        Directory directory = FSDirectory.open(Paths.get(index_DIR));
        IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
        IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig);

        File dataDir = new File(data_DIR);
        for (File file : dataDir.listFiles()) {
            if (file.isFile()) {
                String title = file.getName();
                String content = FileUtils.readFileToString(file, "UTF-8");
                Document document = new Document();
                document.add(new TextField("title", title, Field.Store.YES));
                document.add(new TextField("content", content, Field.Store.NO));
                indexWriter.addDocument(document);
            }
        }
        indexWriter.close();
    }

    /*去索引搜索*/
    @Test
    public void search() throws IOException, ParseException {

        Directory directory = FSDirectory.open(Paths.get(index_DIR));
        DirectoryReader directoryReader = DirectoryReader.open(directory);
        IndexSearcher indexSearcher = new IndexSearcher(directoryReader);
        /*标准分词器 把每一个汉字都分出来*/
        //StandardAnalyzer analyzer = new StandardAnalyzer();

        /* 停用词*/
        String[] stopwords = new String[]{",",".","。","的","了","呢"};
        CharArraySet charArraySet = new CharArraySet(Arrays.asList(stopwords), true);
        Analyzer analyzer = new SmartChineseAnalyzer(charArraySet);
        /*从单个域中搜索*/
       /* Query query = new QueryParser("title", analyzer).parse("身边");*/

        /*从多个域中搜索*/
        String keyword = "坚持"; /*停用词*/
        Query query = new MultiFieldQueryParser(new String[]{"title","content"},analyzer).parse(keyword);
        /*停用辞 , . 的 了 呢*/
        TopDocs topDocs = indexSearcher.search(query, 5);
        long totalHits =topDocs.totalHits;
        System.out.println("共查到"+totalHits+"条");
        /**/
        float maxScore = topDocs.getMaxScore();
        System.out.println("最高得分为:"+maxScore);


        ScoreDoc[] scoreDocs =topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            Document doc = indexSearcher.doc(scoreDoc.doc);

            String title = doc.get("title");
            System.out.println("title:"+title);

            String content = doc.get("content");
            System.out.println("content:"+content);

            float score =scoreDoc.score;
            System.out.println("单句得分"+score);
        }
    }




}
