import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.*;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.BytesRef;

import java.io.IOException;
import java.nio.file.Paths;
import java.util.List;

/**
 * Created by Administrator on 2016/8/20.
 */
public class Test {
    public static void buildIndex(Directory dir) throws IOException {
        Analyzer a;
        //dir = null;
        a = new StandardAnalyzer();
        IndexWriter iw = new IndexWriter(dir,new IndexWriterConfig(a));

        Document doc =null;
        doc= new Document();
        doc.add(new TextField("info", "this is my first lucene test1", Field.Store.YES));
        iw.addDocument(doc);
        doc= new Document();
        doc.add(new TextField("info", "this is my first lucene test2", Field.Store.YES));
        iw.addDocument(doc);
        doc= new Document();
        doc.add(new TextField("info", "this is my first lucene test3", Field.Store.YES));
        iw.addDocument(doc);
        iw.close();
    }
    public static void searchIndexUsingQueryParser(Directory dir) throws IOException, ParseException {
        Analyzer a;
        //dir = null;
        a = new StandardAnalyzer();
        IndexReader reader = DirectoryReader.open(dir);
        IndexSearcher is = new IndexSearcher(reader);
        QueryParser parser = new QueryParser("info", a);
        Query query = parser.parse("lucene");
        TopDocs topDocs = is.search(query, 1000);
        System.out.println("总共匹配多少个：" + topDocs.totalHits);
        ScoreDoc[] hits = topDocs.scoreDocs;
        // 应该与topDocs.totalHits相同
        System.out.println("多少条数据：" + hits.length);
        for (ScoreDoc scoreDoc : hits) {
            System.out.println("匹配得分：" + scoreDoc.score);
            System.out.println("文档索引ID：" + scoreDoc.doc);
            Document document = is.doc(scoreDoc.doc);
            System.out.println(document.get("info"));
        }
        reader.close();
        //dir.close();
    }
    public static void testReader(Directory dir) throws IOException {
        Analyzer a;
        //dir = null;
        a = new StandardAnalyzer();
        IndexReader reader = DirectoryReader.open(dir);
        System.out.println("all the documents---------------------------------");
        for(int i =0;i<reader.maxDoc();i++){
            System.out.println(i+" doc:     "+reader.document(i));
            System.out.println(i+" termVec:     "+reader.getTermVectors(i));
        }

        Term t = null;
        String termValue = "abc";
        t = new Term(termValue);
        System.out.println("info for term "+termValue+":----------------------------------");
        int docfreq = reader.docFreq(t);
        System.out.println("docfreq for \""+termValue+"\"="+docfreq);

        termValue = "this";
        t = new Term(termValue);
        System.out.println("info for term "+termValue+":----------------------------------");
        docfreq = reader.docFreq(t);
        System.out.println("docfreq for \""+termValue+"\"="+docfreq);

        //下面的代码展示如何获取某一个词的文档列表和词的文本信息
        System.out.println("terms----------------------------------");
        List<LeafReaderContext> l=reader.leaves();
        for(LeafReaderContext lrc : l){
            System.out.println("leaf reader begin----------------------------------");
            LeafReader lrd = lrc.reader();
            Terms terms= lrd.terms("info");
            TermsEnum termsEnum = terms.iterator(null);//获取所有的词的遍历器
            BytesRef bytesref;
            while((bytesref= termsEnum.next())!=null){
                System.out.println("term:"+bytesref.utf8ToString()+"    :docFreq="+termsEnum.docFreq());//词的文本,词频
                //reader.docFreq()
               // Term t = new Term(bytesref.utf8ToString());
                PostingsEnum docsEnum = null;
                docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.NONE);//first param is null,and all docs will be there
                int i;
                while((i=docsEnum.nextDoc())!= DocIdSetIterator.NO_MORE_DOCS){
                    System.out.println("docid="+i);
                }
            }
        }

        //测试docvalues
        //this.codereader.getDocValuesReader().getSorted(fi);
    }
    public static void main(String[] args) throws IOException, ParseException {
        Directory dir;
        dir = FSDirectory.open(Paths.get("C:\\work\\tmp\\index1") );
        //buildIndex(dir);
        //searchIndexUsingQueryParser(dir);
        testReader(dir);
    }
}
