package com.ckex.learn.lucene;

import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;

/**
 * @author ckex created 2013-8-26 - 下午2:36:43 IKAnalyzerDemo.java
 * @explain -
 */
public class IKAnalyzerDemo {
    private static String CHARSET = "UTF-8";

    public static void main(String[] args) throws Exception {
        long start = System.currentTimeMillis();

        FieldType fieldType = new FieldType();
        fieldType.setIndexed(true);//set 是否索引
        fieldType.setStored(true);//set 是否存储
        fieldType.setTokenized(true);//set 是否分类

        String search = "智能";

        String test = "为true 时，智能切分，false 或无参时最细粒度切分";

        String field = "contents";

        byte[] bytes = new byte[1024];
        bytes = test.getBytes(CHARSET);

        InputStream iStream = new ByteArrayInputStream(bytes);

        BufferedReader in = new BufferedReader(new InputStreamReader(iStream, CHARSET));

        //为true 时，智能切分，false 或无参时最细粒度切分
        Analyzer analyzer = new IKAnalyzer(Boolean.TRUE);

        IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_44, analyzer);
        iwc.setOpenMode(OpenMode.CREATE);
        Directory dir = new RAMDirectory();
        IndexWriter writer = new IndexWriter(dir, iwc);
        Document doc = new Document();
        doc.add(new LongField("id", System.currentTimeMillis() / 10000, Field.Store.YES));
        doc.add(new TextField(field, in));
        writer.addDocument(doc);
        //        writer.updateDocument(new Term("path", file.getPath()), doc);
        in.close();
        writer.close();
        System.out.println("完成索引: " + (System.currentTimeMillis() - start));

        //********************************

        IndexReader indexReader = DirectoryReader.open(dir);
        IndexSearcher searcher = new IndexSearcher(indexReader);
        QueryParser parser = new QueryParser(Version.LUCENE_44, field, analyzer);
        Query query = parser.parse(search);
        TopDocs results = searcher.search(query, 10);
        ScoreDoc[] hits = results.scoreDocs;

        int numTotalHits = results.totalHits;
        System.out.println(numTotalHits + " total matching documents");

        for (ScoreDoc scoreDoc : hits) {
            System.out.println(scoreDoc.doc);
            System.out.println(scoreDoc.score);
            Document document = searcher.doc(scoreDoc.doc);
            System.out.println(document.get("id") + "  <><><><>  " + document.get(field));
        }

    }

}
