package com.dhf.xhb.util;

import java.io.File;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;

/**
 * Lunece工具类 goods_tb专用，包含创建索引，搜索索引，关闭资源方法
 * 
 * @author DHF
 *
 */
public class LuceneUtil {
	/**
	 * 构造函数，根据给定索引路径和分词器，创建索引writer
	 * 
	 * @param indexPath
	 *            索引路径
	 * @param analyzer
	 *            分词器
	 */
	public IndexWriter createIndexWriter(String indexPath, Analyzer analyzer) {
		IndexWriter indexWriter = null;
		try {
			FSDirectory indexFile = FSDirectory.open(new File(indexPath));
			indexWriter = new IndexWriter(indexFile, new IndexWriterConfig(Version.LUCENE_4_9, analyzer));
			indexWriter.deleteAll();
		} catch (Exception e) {
			e.printStackTrace();
			System.out.println("indexWirter 初始化失败");
		}
		return indexWriter;
	}
	/**
	 * 搜索索引
	 * 
	 * @param keyWord
	 *            要搜索的关键字
	 * @return
	 */
	public Set<String> searchIndex(String keyWord) {
		String indexPath = "d:/xhb/index";// 索引目录
		Set<String> idSet = null;
		try {
			// 创建搜索对象
			IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(FSDirectory.open(new File(indexPath))));
			Analyzer analyzer = new IKAnalyzer();
			// 创建针对goodsName的分析器

			String[] fields = {"goodsName", "goodsType"};
			MultiFieldQueryParser multiParser = new MultiFieldQueryParser(Version.LUCENE_4_9, fields, analyzer);
			// 对传进来的关键字进行分词
			Query query = multiParser.parse(keyWord);
			// 获取符合条件的doc数组
			ScoreDoc[] hits = searcher.search(query, 10000).scoreDocs;
			System.out.println(hits.length);
			idSet = new TreeSet<String>();
			// 遍历搜索到的doc数组，把对应的goodsId加入到idSet中
			for (ScoreDoc sd : hits) {
				idSet.add(searcher.doc(sd.doc).get("goodsId"));
			}

		} catch (Exception e) {
			e.printStackTrace();
			System.out.println("索引检索失败");
		}
		return idSet;

	}

	public Set<String> searchByPage(PageUtil page) {
		String indexPath = "d:/xhb/index";// 索引目录
		Map<String, String> map = page.getSearchInfo();
		String keyWord = map.get("keyWord");
		int nowPage = page.getNowPage();
		int pageSize = page.getPageSize();

		Set<String> idSet = null;
		try {
			// 创建搜索对象
			IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(FSDirectory.open(new File(indexPath))));
			Analyzer analyzer = new IKAnalyzer(true);
			// 创建针对goodsName的分析器
			String[] fields = {"goodsName", "goodsType"};// 搜索goodsName和goodsType两个field
			MultiFieldQueryParser multiParser = new MultiFieldQueryParser(Version.LUCENE_4_9, fields, analyzer);
			// 对传进来的关键字进行分词
			Query query = multiParser.parse(keyWord);
			// 获取符合条件的doc数组
			ScoreDoc[] hits = searcher.search(query, 10000).scoreDocs;
			idSet = new TreeSet<String>();

			// 遍历搜索到的doc数组，把对应的goodsId加入到idSet中
			// 取所有结果中目标页内的结果
			for (int i = (nowPage - 1)
					* pageSize; i < (hits.length < nowPage * pageSize ? hits.length : nowPage * pageSize); i++) {
				idSet.add(searcher.doc(hits[i].doc).get("goodsId"));
			}
			page.setTotalCount(hits.length);// 结果总数即查询到的hits的长度
			page.setTotalPage(hits.length % pageSize == 0 ? hits.length / pageSize : hits.length / pageSize + 1);//
		} catch (Exception e) {
			e.printStackTrace();
			System.out.println("索引检索失败");
		}

		return idSet;

	}
}
