package com.ysq.excavator.lucene;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.ansj.lucene4.AnsjAnalysis;
import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.LogByteSizeMergePolicy;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import com.ysq.excavator.entity.LuceneField;
import com.ysq.excavator.entity.QueryAttribute;

/**
 * lucene 管理类
 * 
 * @author Administrator
 * 
 */
public class LuceneManager {
	private static Logger logger = Logger.getLogger(LuceneManager.class);
	
	public Set<String> fieldSet;
	public String indexPath;
	
	Directory directory = null;
	
	IndexReader indexReader = null;

	IndexSearcher indexSearcher = null;
	IndexWriter indexWriter = null;
	
	public int totalResult;
	
	/**
	 * indexPaht: 索引文件路径
	 * 使用完毕后必须关闭
	 * @param indexPath
	 */
	public LuceneManager(String indexPath) throws Exception{
		// TODO Auto-generated constructor stub
		this.fieldSet = new HashSet<String>();
		this.indexPath = indexPath;
		this.directory = FSDirectory.open(new File(this.indexPath));
	}
	
	/**
	 * 获得IndexWriter对象
	 * 
	 * @return
	 * @throws Exception
	 */
	public static IndexWriter getWriter(Directory dir) throws Exception {
		Analyzer luceneAnalyzer = new AnsjAnalysis();//使用 ansj 中文分词器
		
		IndexWriterConfig indexConf = new IndexWriterConfig(Version.LUCENE_45,luceneAnalyzer);
		//OpenMode.CREATE 覆盖	OpenMode.APPEND 追加		OpenMode.CREATE_OR_APPEND 新建或者追加
		indexConf.setOpenMode(OpenMode.CREATE_OR_APPEND);
		
		LogMergePolicy mergePolicy = optimizeIndex();
		indexConf.setMergePolicy(mergePolicy);//设置索引文件合并
		
		IndexWriter indexWriter = new IndexWriter(dir, indexConf);
		return indexWriter;
	}
	
	/**
	 * 根据查询属性返回 query 对象
	 * @param queryAttr
	 * @return
	 */
	public Query getQuery(QueryAttribute queryAttr){
		Query query = null;
		
		try {
			if(queryAttr == null){
				query = new MatchAllDocsQuery();
			}else{
				String name = queryAttr.getFieldName();
				String type = queryAttr.getFieldType();
				String keyword = queryAttr.getKeyword();
				String endRange = queryAttr.getEndRange();
				
				if(name == null || "".equals(name) || type == null || "".equals(type) || keyword == null || "".equals(keyword)){
					/** MatchAllDocsQuery **/
					query = new MatchAllDocsQuery();
				}else if("TextField".equalsIgnoreCase(type)){
					/** TextField query **/
					AnsjAnalysis analyzer = new AnsjAnalysis();
					QueryParser titleParser = new QueryParser(Version.LUCENE_45, name,analyzer);
					query = titleParser.parse(keyword);
				}else if("IntField".equalsIgnoreCase(type)){
					/** IntField query **/
					int range = Integer.parseInt(keyword);
					int rangeEnd = Integer.parseInt(endRange);
					query = NumericRangeQuery.newIntRange(name, range, rangeEnd, true, true);
				}else if("StringField".equalsIgnoreCase(type)){
					/** StringField query **/
					Term term = new Term(name,keyword);
					query = new TermQuery(term);
				}else if("LongField".equalsIgnoreCase(type)){
					/** LongField query **/
					long range = Long.parseLong(keyword);
					long rangeEnd = Long.parseLong(endRange);
					
					query = NumericRangeQuery.newLongRange(name, range, rangeEnd, true, true);
				}else{
					query = new MatchAllDocsQuery();
				}
			}
		}catch (Exception e) {
			// TODO Auto-generated catch block
			logger.error("getQuery error...",e);
		}
		return query;
	}
	
	/**
	 * 优化索引，返回优化策略
	 * 
	 * @return
	 */
	private static LogMergePolicy optimizeIndex() {
		LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();

		// 设置segment添加文档(Document)时的合并频率
		// 值较小,建立索引的速度就较慢
		// 值较大,建立索引的速度就较快,>10适合批量建立索引
		// 达到50个文件时就和合并
		mergePolicy.setMergeFactor(50);

		// 设置segment最大合并文档(Document)数
		// 值较小有利于追加索引的速度
		// 值较大,适合批量建立索引和更快的搜索
		mergePolicy.setMaxMergeDocs(5000);

		// 启用复合式索引文件格式,合并多个segment
		//mergePolicy.setUseCompoundFile(true);
		return mergePolicy;
	}
	
	/**
	 * 初始化查询所需要的
	 */
	public void initQueryTools() throws Exception{
		this.indexReader = DirectoryReader.open(this.directory);
		this.indexSearcher = new IndexSearcher(this.indexReader);
	}

	/**
	 * 功能健全的 query 类strong
	 */
	public List<Map<String, Object>> luceneStrongQuery(Query query,int start, int limit)throws Exception {
		initQueryTools();
		
		List<Map<String, Object>> mapList = new ArrayList<Map<String,Object>>();

		// 1，把要搜索的文本解析为 Query
		//MatchAllDocsQuery query = new MatchAllDocsQuery();
		//TopDocs topDocs = indexSearcher.search(query, indexReader.maxDoc());
		
		BooleanQuery strongQuery = new BooleanQuery();
		strongQuery.add(query, Occur.MUST);

		// 2，进行分页查询，从索引库中查找
		TopScoreDocCollector results = TopScoreDocCollector.create(start + limit, false);
		
		indexSearcher.search(strongQuery, results);
		
		TopDocs topDocs = results.topDocs(start, limit);

		//3，打印结果
		int i = 0;
		for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
			
			i++;
			// 文档内部编号
			int index = scoreDoc.doc;
			// 根据编号取出相应的文档
			Document doc = indexSearcher.doc(index);

			Map<String, Object> documentMap = new HashMap<String, Object>();
			for(IndexableField field : doc.getFields()){
				String fieldName = field.name();
				
				String fieldValue = field.stringValue();
				
				if("content".equals(fieldName) && fieldValue.length() >100){
					fieldValue = fieldValue.substring(0,100) + "...";
				}
				fieldSet.add(fieldName);
				
				documentMap.put(fieldName, fieldValue);
			}
			
			mapList.add(documentMap);
			
		}

		logger.debug("总共有文档:" + indexReader.maxDoc());

		logger.debug("查询出【" + topDocs.totalHits + "】条匹配结果");
		this.totalResult = topDocs.totalHits;
		
		closeReader();
		
		return mapList;
	}
	
	/**
	 * 根据contentid 删除document
	 * @param contentidList
	 */
	public boolean deleteDocuments(List<Long> contentidList){
		boolean flag = true;
		try {
			indexWriter = getWriter(directory);
			
			Term[] termArray = new Term[contentidList.size()];
			
			for(int i = 0;i<contentidList.size();i++){
				long contentid = contentidList.get(i);
				Term term = new Term("contentid", contentid + "");
				termArray[i] = term;
			}
			this.indexWriter.deleteDocuments(termArray);
			//删除之后要调用这个方法，以保证彻底在磁盘上删除
			this.indexWriter.forceMergeDeletes();
		}catch (Exception e) {
			// TODO Auto-generated catch block
			flag = false;
			e.printStackTrace();
		}finally{
			closeWriter();
		}
		return flag;
	}
	
	/**
	 * 通过主键 contentid 获得条数
	 * @param contentid
	 * @return
	 * @throws Exception
	 */
	public int getContentCount(String contentid){
		int result = 0;
		try {
			initQueryTools();
			
			Term term = new Term("contentid",contentid);
			TermQuery query = new TermQuery(term);
			
			// 2，进行查询，从索引库中查找
			TopDocs topDocs = indexSearcher.search(query, 1);
			
			result = topDocs.totalHits;
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}finally{
			closeReader();
		}
		return result;
	}
	
	/**
	 * 更新或新增一条记录
	 * @param document
	 * @return
	 */
	public boolean createOrUpdateIndex(Document document,String contentid){
		boolean flag = true;
		
		long startTime = new Date().getTime();;
		// 增加document到索引去
		
		try {
			//加载索引文件
			indexWriter = getWriter(directory);
			
			indexWriter.updateDocument(new Term("contentid", contentid + ""),document);
			
		}catch (Exception e) {
			// TODO Auto-generated catch block
			flag = false;
			e.printStackTrace();
		}finally{
			closeWriter();
		}
		long endTime = new Date().getTime();
		logger.debug("索引耗时： " + (endTime - startTime) + "毫秒，索引文件路径： " + indexPath);
		return flag;
	}
	
	/**
	 * 通过 contentid 获得相应数据
	 * @param contentid
	 * @return
	 */
	public List<LuceneField> getDataById(String contentid){
		List<LuceneField> fieldList = new ArrayList<LuceneField>();
		try {
			initQueryTools();
			
			Term term = new Term("contentid",contentid);
			TermQuery query = new TermQuery(term);
			
			// 2，进行查询，从索引库中查找
			TopDocs topDocs = indexSearcher.search(query, 1);
			
			int result = topDocs.totalHits;
			
			if(result != 1){
				System.out.println("Error : recoverContentText get [ " + result + " ] result...");
			}
			for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
				
				// 文档内部编号
				int index = scoreDoc.doc;
				// 根据编号取出相应的文档
				Document doc = indexSearcher.doc(index);
				
				for(IndexableField field : doc.getFields()){
					String fieldName = field.name();
					
					String fieldValue = field.stringValue();
					
					LuceneField luceneField = new LuceneField();
					
					luceneField.setFieldName(fieldName);
					luceneField.setFieldValue(fieldValue);
					luceneField.setFieldType("");
					
					fieldList.add(luceneField);
				}
			}
		}catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}finally{
			closeReader();
		}
		return fieldList;
	}
	
	/**
	 * 关闭 writer
	 */
	public void closeWriter(){
		if(this.indexWriter != null){
			try {
				this.indexWriter.commit();
				this.indexWriter.close();
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}
	}
	
	/**
	 * 关闭 resder
	 */
	public void closeReader(){
		if(this.indexReader != null){
			try {
				this.indexReader.close();
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}
	}

	public static void main(String[] args){
		String indexPath = "D:/lucene/file5/index";
		
		try {
			LuceneManager lucene = new LuceneManager(indexPath);
			List<Long> contentidList = new ArrayList<Long>();
			
			for(int i=1;i<=20;i++){
				long l = i;
				contentidList.add(l);
			}
			
			//lucene.deleteDocuments(contentidList);
			
			System.out.println("delete finidsed...");
			
			MatchAllDocsQuery allQuery = new MatchAllDocsQuery();
			
			lucene.luceneStrongQuery(allQuery,0, 70);
			
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

}
