package cn.itcast;

import org.apache.commons.io.FileUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.*;
import org.apache.lucene.index.*;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.junit.Test;
import org.wltea.analyzer.lucene.IKAnalyzer;

import java.io.File;

public class LuceneInnit {
    //给文件创建索引库
    @Test
    public void initIndex() throws Exception {
        //指定索引库的位置
        FSDirectory directory = FSDirectory.open(new File("E:\\work_class2\\index"));
        //创建分词器,(抽象类)创建他的子类对象
        Analyzer analyzer = new IKAnalyzer();
//创建索引初始化,要传入版本和分词器两个参数
        IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, analyzer);
        IndexWriter indexWriter = new IndexWriter(directory, config);
        //删除之前创建的索引库
        //indexWriter.deleteAll();
        File filePath = new File("E:\\work_class2\\项目2知识点\\190727\\day02_lucene\\资料\\上课用的查询资料searchsource");
        File[] files = filePath.listFiles();
        for (File file : files) {
            //         TextField  分词  需要分词的使用
//                    StringField 不分词  不需要分词的使用(如手机号,身份证号)
            Document document = new Document();
            //fileName 添加正文的term
            document.add(new TextField("fileName", file.getName(), Field.Store.YES));  //Field.Store.YES原始数据是否存储 取决于页面上是否显示
//filePath 添加路径的term
            document.add(new StringField("filePath", file.getPath(), Field.Store.YES));  //Field.Store.YES原始数据是否存储 取决于页面上是否显示
            //fileContent 添加正文的term
            String content = FileUtils.readFileToString(file, "utf-8");
            document.add(new TextField("fileContent", content, Field.Store.NO));  //Field.Store.YES原始数据是否存储 取决于页面上是否显示
//文件的大小放入索引库中
            Long size = FileUtils.sizeOf(file);
            document.add(new LongField("fileSize", size, Field.Store.YES));  //原始数据是否存储 取决于页面上是否显示
            //此时一个文档中相当于有4个属性,有多少个文件生成多少个文档
            indexWriter.addDocument(document);
        }

        indexWriter.close();
    }

    //从索引库中去查
    @Test
    public void query() throws Exception {
        //找到索引库中的位置
        Directory directory = FSDirectory.open(new File("E:\\work_class2\\index"));
        IndexReader indexReader = DirectoryReader.open(directory);
        IndexSearcher indexSearcher = new IndexSearcher(indexReader);

        //按照条件查询term===如查找文件名为apache
        //可以根据不同的需求设置不同的查询方式
        TermQuery query = new TermQuery(new Term("fileName", "简介"));
        //获得查询的结果并规定最多可以显示多少条
        TopDocs topDocs = indexSearcher.search(query, 100);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            //获取文档的id
            int docID = scoreDoc.doc;
            Document doc = indexSearcher.doc(docID);
            System.out.println("fileName:" + doc.get("fileName"));
            System.out.println("fileSize:" + doc.get("fileSize"));
            System.out.println("filePath:" + doc.get("filePath"));
//            System.out.println("fileContent:"+doc.get("fileContent"));
            System.out.println("============================================================");
        }
//一共查询出来了多少条
        int totalHits = topDocs.totalHits;
        System.out.println("一共" + totalHits);

        indexReader.close();
    }

    //测试分词器
    @Test
    public void testAnalyzer() throws Exception {
        //  String text = "The Spring Framework provides a comprehensive programming and configuration model.";
        String text = "李大庆，蓝瘦香菇传智播客美特斯邦威";
//创建分词器
        //    Analyzer analyzer = new StandardAnalyzer();
//        Analyzer analyzer = new CJKAnalyzer();
//        Analyzer analyzer = new SmartChineseAnalyzer();
        Analyzer analyzer = new IKAnalyzer();
//

        TokenStream tokenStream = analyzer.tokenStream("test", text);
//        设置指针引用
        CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
        //让指针复位 因为指针指的并不是第一个词(分离出来很多词)
        tokenStream.reset();
        while (tokenStream.incrementToken()) {
            System.out.println(charTermAttribute);
        }
    }


    @Test
    public void addOneIndex() throws Exception {
        //指定索引库的位置
        FSDirectory directory = FSDirectory.open(new File("E:\\work_class2\\index"));
        //创建分词器,(抽象类)创建他的子类对象
        Analyzer analyzer = new IKAnalyzer();
//创建索引初始化,要传入版本和分词器两个参数
        IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, analyzer);
        IndexWriter indexWriter = new IndexWriter(directory, config);
        //删除之前创建的索引库

            //         TextField  分词
//                    StringField 不分词
            Document document = new Document();
            //fileName 添加正文的term
        TextField textField = new TextField("fileName", "暗示法好呢", Field.Store.YES);
        //人为设置权重,默认权重是1
        textField.setBoost(2);
        document.add(textField);
            indexWriter.addDocument(document);
        indexWriter.close();
    }



}
