package com.matrix.nickel.lucene;

import org.apache.commons.io.FileUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.*;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.junit.Test;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;

/**
 *
 */
public class FirstLucene {

    @Test
    public void testCreateIndex() throws IOException {
        // 1. 创建IndexWriter对象
        // 1.1 指定索引库位置
        // 1.2 指定文档内容分析器
        Directory directory = FSDirectory.open(new File("G:\\intellijIdea2018\\lucene_solr\\.index").toPath());
        Analyzer analyzer = new StandardAnalyzer();
        IndexWriterConfig config = new IndexWriterConfig(analyzer);
        IndexWriter indexWriter = new IndexWriter(directory, config);
        // 2. 创建document对象
        // 3. 创建field对象
        File file = new File("G:\\intellijIdea2018\\lucene_solr\\documents");
        File[] files = file.listFiles();
        if (files == null) {
            return;
        }
        for (File f : files) {
            if (f.isDirectory()) continue;
            Document document = new Document();
            String fileName = f.getName();
            Field fileNameField = new TextField("fileName", fileName, Store.YES);

            long fileSize = FileUtils.sizeOf(f);
            Field fileSizeField = new StoredField("fileSize", fileSize);

            String filePath = file.getPath();
            Field filePathField = new StoredField("filePath", filePath);

            String fileContent = FileUtils.readFileToString(f, "UTF-8");
            Field fileContentField = new TextField("fileContent", fileContent, Store.NO);

            document.add(fileNameField);
            document.add(fileSizeField);
            document.add(filePathField);
            document.add(fileContentField);

            indexWriter.addDocument(document);
        }
        indexWriter.close();
    }

    @Test
    public void testSearchIndex() throws IOException {
        // 1. 指定索引库位置
        Directory directory = FSDirectory.open(new File("G:\\intellijIdea2018\\lucene_solr\\.index").toPath());
        IndexReader indexReader = DirectoryReader.open(directory);
        IndexSearcher indexSearcher = new IndexSearcher(indexReader);
        Query query = new TermQuery(new Term("fileName", "spring"));
        TopDocs topDocs = indexSearcher.search(query, 5);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            int doc = scoreDoc.doc;
            Document document = indexSearcher.doc(doc);
            String fileName = document.get("fileName");
            System.out.println(fileName);
            String fileSize = document.get("fileSize");
            System.out.println(fileSize);
            String filePath = document.get("filePath");
            System.out.println(filePath);
            String fileContent = document.get("fileContent");
            // System.out.println(fileContent);
        }
        indexReader.close();
    }

    @Test
    public void testTokenStream() throws IOException {
        // Analyzer analyzer = new WhitespaceAnalyzer();
        // Analyzer analyzer = new StandardAnalyzer();
        CJKAnalyzer analyzer = new CJKAnalyzer();

        String inputText = "我们都是一个中国人，我们都不会说英语";
        TokenStream tokenStream = analyzer.tokenStream("text", new StringReader(inputText));
        //保存token字符串
        CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
        //在调用incrementToken()开始消费token之前需要重置stream到一个干净的状态
        tokenStream.reset();
        while (tokenStream.incrementToken()) {
            //打印分词结果
            System.out.print("[" + charTermAttribute + "]");
        }
    }
}
