/**
 * IK 中文分词 版本 5.0 IK Analyzer release 5.0
 * 
 * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
 * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
 * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
 * License. You may obtain a copy of the License at
 * 
 * http://www.apache.org/licenses/LICENSE-2.0
 * 
 * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 * specific language governing permissions and limitations under the License.
 * 
 * 源代码由林良益(linliangyi2005@gmail.com)提供 版权声明 2012，乌龙茶工作室 provided by Linliangyi and copyright 2012 by Oolong studio
 * 
 * 
 */
package org.wltea.analyzer.sample;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;

/**
 * 使用IKAnalyzer进行Lucene索引和查询的演示 2012-3-2
 * 
 * 以下是结合Lucene4.0 API的写法
 * 
 */
public class LuceneIndexAndSearchDemo {

	/**
	 * 模拟： 创建一个单条记录的索引，并对其进行搜索
	 * @param args
	 */
	public static void main(String[] args) {
		// Lucene Document的域名
		String fieldName = "text";
		// 检索内容
		String text2 = "123";
		String text1 = "IK Analyzer是一个结合词典分词和文法分词的中文分词开源工具包。它使用了全新的正向迭代最细粒度切分算法。";
		// 实例化IKAnalyzer分词器
		Analyzer analyzer = new IKAnalyzer(true);
		Directory directory = null;
		IndexWriter iwriter = null;
		IndexReader ireader = null;
		IndexSearcher isearcher = null;
		try {
			// 建立内存索引对象
			File file = new File("e:/lucene3");
			directory = FSDirectory.open(file);

			// 配置IndexWriterConfig
			IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_47, analyzer);
			iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
			iwriter = new IndexWriter(directory, iwConfig);
			// 写入索引
			Document doc = new Document();
			TextField textField1 = new TextField(fieldName + "1", text1, Field.Store.YES);
			TextField textField2 = new TextField(fieldName + "2", text2, Field.Store.YES);
			// textField.setBoost(boost);
			doc.add(textField1);
			doc.add(textField2);
			iwriter.addDocument(doc);
			iwriter.close();

			// 搜索过程**********************************
			// 实例化搜索器
			ireader = DirectoryReader.open(directory);
			isearcher = new IndexSearcher(ireader);

			String keyword = "中文分词工具包";
			// 使用QueryParser查询分析器构造Query对象
			String[] queryfields = { fieldName + "1", fieldName + "2" };
			QueryParser qp = new MultiFieldQueryParser(Version.LUCENE_47, queryfields, analyzer);
			qp.setDefaultOperator(QueryParser.OR_OPERATOR);

			Query query1 = qp.createBooleanQuery(fieldName + "1", keyword);
			query1.setBoost(20f);
			Query query2 = qp.createBooleanQuery(fieldName + "2", keyword);
			query2.setBoost(1f);

			BooleanQuery query = new BooleanQuery();
			query.add(query1, Occur.SHOULD);
			query.add(query2, Occur.SHOULD);

			// 搜索相似度最高的5条记录
			TopDocs topDocs = isearcher.search(query, 50);
			System.out.println("Query = " + query);
			System.out.println("命中：" + topDocs.totalHits);
			// 输出结果
			ScoreDoc[] scoreDocs = topDocs.scoreDocs;
			for (int i = 0; i < scoreDocs.length; i++) {
				System.out.println("====================文件【" + (i + 1) + "】=================");
				Document targetDoc = isearcher.doc(scoreDocs[i].doc);
				List<IndexableField> fields = targetDoc.getFields();
				for (IndexableField field : fields) {
					System.out.println(field.name());
					System.out.println(field.stringValue());
				}
				Document document = isearcher.doc(scoreDocs[i].doc);
				System.out.println("检索关键词：" + keyword.toString());
				System.out.println("文件ID:" + scoreDocs[i].doc);
				String content = document.get(fieldName + "1");
				/* Begin:开始关键字高亮 */
				SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
				Highlighter highlighter = new Highlighter(formatter, new QueryScorer(query));
				highlighter.setTextFragmenter(new SimpleFragmenter(400));
				Analyzer luceneAnalyzer = new CJKAnalyzer(Version.LUCENE_47);
				if (content != null) {
					TokenStream tokenstream = luceneAnalyzer.tokenStream(keyword, new StringReader(content));
					try {
						content = highlighter.getBestFragment(tokenstream, content);
					} catch (Exception e) {
						e.printStackTrace();
					}
				}
				/* End:结束关键字高亮 */
				System.out.println("文件内容:" + content);
				System.out.println("匹配相关度：" + scoreDocs[i].score);
			}

		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			if (ireader != null) {
				try {
					ireader.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
			if (directory != null) {
				try {
					directory.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}
	}
}
