/**  
* @Title: IndexDemo.java
* @Package com.lemon.conf.lucene
* @Description: (一句话阐述):
* @author Ran_cc
* @date 2018年6月1日
* @version V1.0  
*/
package com.lemon.conf.lucene;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldCollector;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.search.highlight.TextFragment;
import org.apache.lucene.search.highlight.TokenSources;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.springframework.stereotype.Component;
import org.wltea.analyzer.lucene.IKAnalyzer;

import com.github.pagehelper.PageInfo;
import com.lemon.conf.MyException;
import com.lemon.utils.StringUtil;
/**
 * <p>Title: </p>
 * <p>Description: </p>
 * <p>Company: </p>
 * @author Ran_cc
 * @date 2018年6月1日
 * @版本 V 1.0 
 */
//@Component
public class LuceneSearch {
	
	
	/*int row = selectByList.size();
	for(int i = 0; i < row; i++) {
		//获取每行数据
		TkCommodityVipCoupon coupon = selectByList.get(i);
		//创建Document对象
		Document doc = new Document();
		//获取每列数据
		Field foodid = new Field("title", coupon.getTitle(), TextField.TYPE_STORED);
		Field foodname = new Field("itemId", coupon.getItemId(), TextField.TYPE_STORED);
		Field type = new Field("type", coupon.getType().toString(), TextField.TYPE_STORED);
		Field createTime = new Field("createTime", StringUtil.DateToString(coupon.getCreateTime(), 4), TextField.TYPE_STORED);
		//            Field price=new Field("price",lineData.get("price").toString(),TextField.TYPE_STORED);
		//            Field imagepath=new Field("imagepath",lineData.get("imagepath").toString(),TextField.TYPE_STORED);
		//添加到Document中
		doc.add(foodid);
		doc.add(foodname);
		doc.add(type);
		doc.add(createTime);
		//			doc.add(price);
		//			doc.add(imagepath);
		//调用，创建索引库
		LuceneSearch.write(doc);
	}*/
	
	
	
	
	//Lucene索引文件路径
	static String dir = "E:\\lucence";
	//定义分词器
	static Analyzer analyzer = new IKAnalyzer(true);
	static Directory directory = null;
	static IndexWriter indexWriter = null;
	/**
	 * 封裝一个方法，用于将数据库中的数据解析为一个个关键字词存储到索引文件中
	 * 
	 * @param doc
	 */
	public static void write( Document doc ) {
		try {
			directory = FSDirectory.open(new File(dir));
			//索引库的存储目录
			//Directory directory = FSDirectory.open(new File(dir));
			//关联当前lucence版本和分值器
			IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_47, analyzer);
			//传入目录和分词器
			indexWriter = new IndexWriter(directory, config);
			//写入到目录文件中
			indexWriter.addDocument(doc);
			//提交事务
			indexWriter.commit();
			//关闭流
			indexWriter.close();
			System.out.println("写入完成");
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	/**
	 * 删除索引
	 * 
	 * @param str 删除的关键字
	 * @throws Exception
	 */
	public static void delete( String str ) throws Exception {
		Date date1 = new Date();
		analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
		directory = FSDirectory.open(new File(dir));
		IndexWriterConfig config = new IndexWriterConfig(
				Version.LUCENE_CURRENT, analyzer);
		indexWriter = new IndexWriter(directory, config);
		indexWriter.deleteDocuments(new Term("itemId", str));
		indexWriter.close();
		Date date2 = new Date();
		System.out.println("删除索引耗时：" + (date2.getTime() - date1.getTime()) + "ms\n");
	}

	/**
	 * <b>Description:<em><blockquote>
	 * 商品信息检索
	 * </blockquote></em></b>
	 * 
	 * @Title: search
	 * @param field 检索类型
	 * @param value 检索值
	 * @param page 当前页
	 * @param length 每页个数
	 * @param typeValue 分类类型
	 * @param sortName 排序字段
	 * @return
	 * @throws Exception
	 * @author Ran_cc
	 * @date 2018年6月2日
	 */
	@SuppressWarnings( { "rawtypes", "unchecked" } )
	public static PageInfo searchItem( String field, String value, int page, int length, Map<String, Object> searchType, String sortName ) throws Exception {
		//索引库的存储目录
		directory = FSDirectory.open(new File(dir));
		//读取索引库的存储目录
		DirectoryReader ireader = DirectoryReader.open(directory);
		//搜索类
		IndexSearcher isearcher = new IndexSearcher(ireader);
		//lucence查询解析器，用于指定查询的属性名和分词器
		QueryParser parser = new QueryParser(Version.LUCENE_47, field, analyzer);
		
		// 多条件必备神器
		BooleanQuery booleanQuery = new BooleanQuery();  
		
		//搜索 第一个条件是必须的
		Query query1 = parser.parse(value);
		//最终被分词后添加的前缀和后缀处理器，默认是粗体<B></B>
		SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter("<b><font color=red>", "</font></b>");
		//高亮搜索的词添加到高亮处理器中
		Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(query1));
		//Sort sort = new Sort(new SortField("type", SortField.Type.INT, true)); //索引排序
		
		booleanQuery.add(query1, Occur.MUST);

		if ( !StringUtil.isEmpty(searchType.get("type")) ) {
			Query query2 = new QueryParser(Version.LUCENE_47, "type", analyzer).parse(searchType.get("type").toString());
			booleanQuery.add(query2, Occur.MUST);
		}
		
		//hits = isearcher.search(query);
		
		PageInfo pageInfo = new PageInfo();
		int start = (page - 1) * length;
		//索引分页
		if ( page <= 0 ) {
			page = 1;
		}
		if ( length <= 0 || length > 20 ) {
			length = 20;
		}
		int hm = start + length;
		TopScoreDocCollector res = TopScoreDocCollector.create(hm, true);
		//		TopFieldCollector res = TopFieldCollector.create(sort, hm, false, true, false, false);
		isearcher.search(booleanQuery, res);
		int amount = res.getTotalHits();
		//map.put("amount",amount);//总共多少条记录
		TopDocs tds = res.topDocs(start, length);
		//获取搜索的结果，指定返回document返回的个数
		//ScoreDoc [] hits = isearcher.search(query, null, 50, sort).scoreDocs;

		pageInfo.setSize(amount);
		pageInfo.setFirstPage(1);

		if ( amount % length != 0 ) {
			pageInfo.setPages(amount / length + 1);
		} else {
			pageInfo.setPages(amount / length);
		}
		pageInfo.setPageNum(page);
		pageInfo.setTotal(amount);

		ScoreDoc [] hits = tds.scoreDocs;
		List<Map> list = new ArrayList<Map>();
		//遍历，输出
		for(int i = 0; i < hits.length; i++) {
			int id = hits[i].doc;
			Document hitDoc = isearcher.doc(hits[i].doc);
			/*String type = hitDoc.get("type");
			String typev = searchType.get("type").toString();
			if ( !typev.equals(type) ) {
				continue;
			}*/

			Map map = new HashMap();
			map.put(field, hitDoc.get(field));
			map.put("amount", amount);//总共多少条记录
			//获取到foodname
			String foodname = hitDoc.get(field);
			//将查询的词和搜索词匹配，匹配到添加前缀和后缀
			TokenStream tokenStream = TokenSources.getAnyTokenStream(isearcher.getIndexReader(), id, field, analyzer);
			//传入的第二个参数是查询的值
			TextFragment [] frag = highlighter.getBestTextFragments(tokenStream, foodname, false, 10);
			String foodValue = "";
			for(int j = 0; j < frag.length; j++) {
				if ( (frag[j] != null) && (frag[j].getScore() > 0) ) {
					//获取 foodname 的值
					foodValue = ((frag[j].toString()));
				}
			}
			map.put("title", foodValue);
			map.put("itemId", hitDoc.get("itemId"));
			map.put("createTime", hitDoc.get("createTime"));
			map.put("type", hitDoc.get("type"));
			list.add(map);
		}
		ireader.close();
		directory.close();

		pageInfo.setList(list);

		return pageInfo;
	}

	public static void main( String [] args ) {
		try {
			delete("36310719212");
		} catch (Exception e) {
			//将错误信息输出到日志文件
			MyException.writeErr(e);
		}
	}
}
