package net.lucene.chapter10.index;

import org.apache.commons.io.FileUtils;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.apache.tika.Tika;
import org.apache.tika.metadata.Metadata;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;

/**
 * 【第十章】Tika
 */
public class HelloTikaIndex {

    // 索引存储目录
    private static final String PATH_OF_INDEX = "chapter10-demo/src/main/resources/lucene/10_index";
    // 被索引文件目录
    private static final String PATH_OF_FILE = "chapter10-demo/src/main/resources/lucene/10_file";

    public void createIndex() {

        Directory directory;
        IndexWriter writer = null;
        Document doc;

        try {

            directory = FSDirectory.open(new File(PATH_OF_INDEX));
            writer = new IndexWriter(directory, new IndexWriterConfig(Version.LUCENE_36,
                    new StandardAnalyzer(Version.LUCENE_36)));

            for (File file : FileUtils.listFiles(new File(PATH_OF_FILE), new String[]{"txt"}, true)) {
                doc = new Document();

                //当保存文件的Metadata时,要过滤掉文件夹,否则会报告文件夹无法访问的异常
//                if (file.isDirectory()) {
//                    continue;
//                }
//                Metadata metadata = new Metadata();
//                doc.add(new Field("content", new Tika().parse(new FileInputStream(file), metadata)));
                doc.add(new Field("content", new Tika().parse(file)));
                doc.add(new Field("fileName", file.getName(), Field.Store.YES, Field.Index.NOT_ANALYZED));
                doc.add(new Field("filePath", file.getAbsolutePath(), Field.Store.YES, Field.Index.NOT_ANALYZED));
                // 通过IndexWriter添加文档到索引中
                writer.addDocument(doc);
            }
        } catch (IOException | NullPointerException e) {
            System.out.println("创建索引的过程中遇到异常，堆栈轨迹如下：");
            e.printStackTrace();
        } finally {
            if (null != writer) {
                try {
                    writer.close(); // IndexWriter在用完之后一定要关闭
                } catch (IOException e) {
                    System.out.println("关闭IndexWriter时遇到异常，堆栈轨迹如下：");
                    e.printStackTrace();
                }
            }
        }
    }

    public void searchFile(String fileName, String keyWords) {

        IndexReader reader = null;
        try {
            reader = IndexReader.open(FSDirectory.open(new File(PATH_OF_INDEX)));
            IndexSearcher searcher = new IndexSearcher(reader);
            Query query = new TermQuery(new Term(fileName, keyWords));
            // 第二个参数指定搜索后显示的条数,若查到5条则显示为5条,查到15条则只显示10条
            TopDocs tds = searcher.search(query, 50);
            // TopDocs中存放的并不是我们的文档,而是文档的ScoreDoc对象
            ScoreDoc[] sds = tds.scoreDocs;

            // ScoreDoc对象相当于每个文档的ID号,我们就可以通过ScoreDoc来遍历文档
            for (ScoreDoc sd : sds) {
                // sd.doc得到的是文档的序号
                Document doc = searcher.doc(sd.doc);
                // 输出该文档所存储的信息
                System.out.println(doc.get("fileName") + " [" + doc.get("filePath") + "]");
            }
        } catch (Exception e) {
            System.out.println("搜索文件的过程中遇到异常,堆栈轨迹如下：");
            e.printStackTrace();
        } finally {
            if (null != reader) {
                try {
                    reader.close();
                } catch (IOException e) {
                    System.out.println("关闭IndexReader时遇到异常,堆栈轨迹如下：");
                    e.printStackTrace();
                }
            }
        }
    }
}