/**
 * 
 * Demo.java
 * Defender
 * 2013-11-23
 */
package com.ls.fw.index.test;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;

import com.ls.fw.data.search.impl.dao.SearchDao;
import com.ls.fw.data.search.impl.lucene.bean.SearchResult;

/**
 * com.demo.lucene Demo
 * 
 * @author Defender 2013-11-23 下午08:19:18
 */
public class Demo {

	static String pathFile = "E://java//lucene/index";

	public static void main(String[] args) throws Exception {
		/**
		 * 查询字段
		 */
		Term term = new Term("name", "zhang");
		/**
		 * 模糊查询器
		 */
		FuzzyQuery fuzzyQuery = new FuzzyQuery(term);

		Term prefixterm = new Term("name", "zh");
		PrefixQuery prefixQuery = new PrefixQuery(prefixterm);

		/**
		 * 范围搜索
		 */
		Term begin = new Term("datetime", "191111");
		Term end = new Term("datetime", "198108");
		/**
		 * 参数Boolean值说明 false表示不包含 191111 true 表示包含198108 这两个Boolean表示查询结果
		 * 是否包含lower,upper两个临界值
		 */
		TermRangeQuery termRangeQuery = new TermRangeQuery("datetime",
				begin.bytes(), end.bytes(), false, true);
		/**
		 * 使用通配符查询
		 */
		Term wildcardterm = new Term("name", "zh???g*");
		WildcardQuery wildcardQuery = new WildcardQuery(wildcardterm);

		/**
		 * 利用BooleanQuery组合多个Query搜索
		 */
		Term term1 = new Term("name", "zh*");
		WildcardQuery wildcardQuery1 = new WildcardQuery(term1);
		Term term2 = new Term("datetime", "191111");
		TermQuery termQuery = new TermQuery(term2);
		BooleanQuery booleanQuery = new BooleanQuery();
		booleanQuery.add(wildcardQuery1, Occur.MUST);
		booleanQuery.add(termQuery, Occur.MUST);
		Analyzer analyzer = new IKAnalyzer(true);
		// 中文分词
		SearchDao dao = null;// new SearchDaoImpl(pathFile, analyzer);

//		dao.setAutoCommit(false);

		add(dao);
		dao.commit();

		search(dao);

		update(dao);
		dao.commit();
//		dao.reOpenIndexReader();
		search(dao);
		dao.close();
	}

	public static void test() {
		// Lucene Document的域名
		String fieldName = "text";
		// 检索内容
		String text = "IK Analyzer是一个结合词典分词和文法分词的中文分词开源工具包。它使用了全新的正向迭代最细粒度切分算法。";

		// 实例化IKAnalyzer分词器
		Analyzer analyzer = new IKAnalyzer(true);

		Directory directory = null;
		IndexWriter iwriter = null;
		IndexReader ireader = null;
		IndexSearcher isearcher = null;
		try {
			// 建立内存索引对象
			directory = new RAMDirectory();

			// 配置IndexWriterConfig
			IndexWriterConfig iwConfig = new IndexWriterConfig(
					Version.LUCENE_45, analyzer);
			iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
			iwriter = new IndexWriter(directory, iwConfig);
			// 写入索引
			Document doc = new Document();
			doc.add(new StringField("ID", "10000", Field.Store.YES));
			doc.add(new TextField(fieldName, text, Field.Store.YES));
			iwriter.addDocument(doc);
			iwriter.close();

			// 搜索过程**********************************
			// 实例化搜索器
			ireader = DirectoryReader.open(directory);
			isearcher = new IndexSearcher(ireader);

			String keyword = "中文分词工具包";
			// 使用QueryParser查询分析器构造Query对象
			QueryParser qp = new QueryParser(Version.LUCENE_40, fieldName,
					analyzer);
			qp.setDefaultOperator(QueryParser.AND_OPERATOR);
			Query query = qp.parse(keyword);
			System.out.println("Query = " + query);

			// 搜索相似度最高的5条记录
			TopDocs topDocs = isearcher.search(query, 5);
			System.out.println("命中：" + topDocs.totalHits);
			// 输出结果
			ScoreDoc[] scoreDocs = topDocs.scoreDocs;
			for (int i = 0; i < topDocs.totalHits; i++) {
				Document targetDoc = isearcher.doc(scoreDocs[i].doc);
				System.out.println("内容：" + targetDoc.toString());
			}

		} catch (CorruptIndexException e) {
			e.printStackTrace();
		} catch (LockObtainFailedException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		} catch (ParseException e) {
			e.printStackTrace();
		} finally {
			if (ireader != null) {
				try {
					ireader.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
			if (directory != null) {
				try {
					directory.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}
	}

	public static void token() {
		// 构建IK分词器，使用smart分词模式
		Analyzer analyzer = new IKAnalyzer(true);

		// 获取Lucene的TokenStream对象
		TokenStream ts = null;
		try {
			ts = analyzer
					.tokenStream(
							"myfield",
							new StringReader(
									"这是一个中文分词的例子，你可以直接运行它！IKAnalyer can analysis english text too"));
			// 获取词元位置属性
			OffsetAttribute offset = ts.addAttribute(OffsetAttribute.class);
			// 获取词元文本属性
			CharTermAttribute term = ts.addAttribute(CharTermAttribute.class);
			// 获取词元文本属性
			TypeAttribute type = ts.addAttribute(TypeAttribute.class);

			// 重置TokenStream（重置StringReader）
			ts.reset();
			// 迭代获取分词结果
			while (ts.incrementToken()) {
				System.out.println(offset.startOffset() + " - "
						+ offset.endOffset() + " : " + term.toString() + " | "
						+ type.type());
			}
			// 关闭TokenStream（关闭StringReader）
			ts.end(); // Perform end-of-stream operations, e.g. set the final
						// offset.

		} catch (IOException e) {
			e.printStackTrace();
		} finally {
			// 释放TokenStream的所有资源
			if (ts != null) {
				try {
					ts.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}
	}

	public static void add(SearchDao dao) throws Exception {
		String[] ids = { "1", "2" };
		String[] content = { "Amsterdam has lost of add  cancals",
				"i love  add this girl" };
		String[] city = { "Amsterdam", "Venice" };

		List<Document> docs = new ArrayList<Document>();
		for (int i = 0; i < ids.length; i++) {
			Document doc = new Document();
			// A field that is indexed but not tokenized: the entire String
			// value is indexed as a single token
			doc.add(new StringField("id", ids[i], Store.YES));

			// A field that is indexed and tokenized, without term vectors
			doc.add(new TextField("content", content[i], Store.YES));

			doc.add(new StringField("city", city[i], Store.YES));

			/*
			 * doc.add(new DoubleField("age", i, Store.YES)); //Field that
			 * stores a set of per-document BytesRef values, indexed for
			 * faceting,grouping,joining //If you also need to store the value,
			 * you should add a separate StoredField instance doc.add(new
			 * SortedSetDocValuesField("name", new BytesRef("hello")));
			 * doc.add(new SortedSetDocValuesField("name", new
			 * BytesRef("world")));
			 * 
			 * //a stored-only field doc.add(new StoredField("describe", ""));
			 * 
			 * //Field that stores a per-document BytesRef value, indexed for
			 * sorting //If you also need to store the value, you should add a
			 * separate StoredField instance doc.add(new
			 * SortedDocValuesField("name", new BytesRef("hello")));
			 * 
			 * //Field that stores a per-document long value for scoring,
			 * sorting or value retrieval //If you also need to store the value,
			 * you should add a separate StoredField instance doc.add(new
			 * NumericDocValuesField("name", 22L));
			 * 
			 * //Field that stores a per-document BytesRef value. //The values
			 * are stored directly with no sharing, //which is a good fit when
			 * the fields don't share (many) values, such as a title field. //If
			 * values may be shared and sorted it's better to use
			 * SortedDocValuesField //If you also need to store the value, you
			 * should add a separate StoredField instance doc.add(new
			 * BinaryDocValuesField("name", new BytesRef("hello")));
			 */
			docs.add(doc);
		}

		// analyzer = new IKAnalyzer(true);
//		dao.addDocs(docs);
	}

	public static void update(SearchDao dao) throws Exception {
		String[] ids = { "1" };
		String[] content = { "1111Amsterdam has lost of add  cancals" };
		String[] city = { "1111Amsterdam11" };
		Document doc = new Document();
		for (int i = 0; i < 1; i++) {

			// A field that is indexed but not tokenized: the entire String
			// value is indexed as a single token
			doc.add(new StringField("id", ids[i], Store.YES));

			// A field that is indexed and tokenized, without term vectors
			doc.add(new TextField("content", content[i], Store.YES));

			doc.add(new StringField("city", city[i], Store.YES));

			/*
			 * doc.add(new DoubleField("age", i, Store.YES)); //Field that
			 * stores a set of per-document BytesRef values, indexed for
			 * faceting,grouping,joining //If you also need to store the value,
			 * you should add a separate StoredField instance doc.add(new
			 * SortedSetDocValuesField("name", new BytesRef("hello")));
			 * doc.add(new SortedSetDocValuesField("name", new
			 * BytesRef("world")));
			 * 
			 * //a stored-only field doc.add(new StoredField("describe", ""));
			 * 
			 * //Field that stores a per-document BytesRef value, indexed for
			 * sorting //If you also need to store the value, you should add a
			 * separate StoredField instance doc.add(new
			 * SortedDocValuesField("name", new BytesRef("hello")));
			 * 
			 * //Field that stores a per-document long value for scoring,
			 * sorting or value retrieval //If you also need to store the value,
			 * you should add a separate StoredField instance doc.add(new
			 * NumericDocValuesField("name", 22L));
			 * 
			 * //Field that stores a per-document BytesRef value. //The values
			 * are stored directly with no sharing, //which is a good fit when
			 * the fields don't share (many) values, such as a title field. //If
			 * values may be shared and sorted it's better to use
			 * SortedDocValuesField //If you also need to store the value, you
			 * should add a separate StoredField instance doc.add(new
			 * BinaryDocValuesField("name", new BytesRef("hello")));
			 */
		}
		Term term = new Term("id", "1");
//		dao.updateDoc(term, doc);
	}

	public static void search(SearchDao dao) throws Exception {
		Term term = new Term("content", "add");
		Query query = new TermQuery(term);
		SearchResult result = null;//dao.search(query, 10);
		TopDocs topdocs = result.getTopdocs();
		ScoreDoc[] scoreDocs = topdocs.scoreDocs;
		System.out.println("查询结果总数---" + topdocs.totalHits + "最大的评分--"
				+ topdocs.getMaxScore());
		for (int i = 0; i < scoreDocs.length; i++) {
			int doc = scoreDocs[i].doc;
			Document document = result.getSearcher().doc(doc);
			System.out.println("id:" + document.get("id") + "---scors--"
					+ scoreDocs[i].score + "---index--"
					+ scoreDocs[i].shardIndex);
			System.out.println("content:" + document.get("content"));
			System.out.println("city:" + document.get("city"));

		}
		// result.close();
	}

}
