package com.omall.search.service.impl;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Map.Entry;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.NumericTokenStream;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldCollector;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.wltea.analyzer.lucene.IKAnalyzer;
import com.omall.search.bean.KeyWordsTool;
import com.omall.search.bean.Search;
import com.omall.search.dao.BaseDao;
import com.omall.search.service.LuceneService;
import com.omall.search.util.ContentExtractor;
import com.omall.search.util.JSONUtil;
import com.omall.search.util.PropertiesUtil;
import com.omall.search.util.ReflectObject;

/**
 *  Lucene 服务实现
 *  
 *         1.创建索引；
 *         2.更新索引；
 *         3.查询索引；
 *         4...
 * 
 * @ClassName:LuceneService
 * @Description:TODO
 * @date: 2014-10-23下午04:40:42
 * @author: 谢洪飞
 * @version: V1.0
 */
//@Service(LuceneService.LUCENESERVICE)
@Service("LuceneService")
public class LuceneServiceImpl implements LuceneService {

	
	public Logger logger = Logger.getLogger("settlog.txt");
	
	@Autowired
	private BaseDao baseDao;
	
	/**创建非只能分词器，实现最细粒度分词*/
	private Analyzer analyzer = new IKAnalyzer(false);
	
	
	/**
	 *   创建索引方法
	 * 
	 *@Title:buildIndex
	 *@Description:定时任务调度将数据库内容创建Lucene索引
	 *@param:@throws Exception
	 *@Return:void
	 *@author: 谢洪飞
	 *@Thorws:
	 */
	@SuppressWarnings("unchecked")
	@Override
	public void buildIndex() throws Exception {
		
		//0.准备.
		IndexWriterConfig iwConfig = null;
		IndexWriter indexWriter = null;
		Directory directory = null;
		
		//1.获取Properties配置文件(search.properties)
		String path = PropertiesUtil.path();
		Properties prop = PropertiesUtil.getProperties(path+"search.properties");
		
		//2.获取search.properties文件中的statmentName参数数组(与Mapper文件中的id对应)
		String [] statementNames = prop.getProperty("statementName").trim().split(",");
		String lucenePathPrefix = prop.getProperty("indexPath").trim();
		
		//3.查询数据库中内容，并生成相关索引
		if (null != statementNames && statementNames.length>0)
		{
			
			
			//遍历查询所有要生成索引的数据
		   	for (String statementName : statementNames) 
		   	{
		   	     //IndexWriter配置参数
				iwConfig = new IndexWriterConfig(Version.LUCENE_47,analyzer);
		   		
		   		 //获取要创建索引的数据
		   		 List<Search> list = baseDao.queryForList(statementName);
		   		 //指定要创建索引的位置
		   		 String filePath = lucenePathPrefix+statementName+"_lucene";
		   		 directory = FSDirectory.open(new File(filePath));
		   		 indexWriter = new IndexWriter(directory, iwConfig);
		   		 //创建索引
		   		 for (Search search : list)
		   		 {
		   			  logger.debug("!------ 创建索引开始!"+new Date());
		   			  logger.info("|--- 开始创建 "+statementName+"_lucene 索引 ---|");
		   			  Document doc = new Document();
		   			  Map<String , Object> searchMap = ReflectObject.reflectFromObject(search);
		   			  Iterator iterator = searchMap.entrySet().iterator();
		   			  while(iterator.hasNext())
		   			  {
		   				  Map.Entry entry = (Entry) iterator.next();
		   				  Object key = entry.getKey();
		   				  Object value = entry.getValue();
                          if(null != key && ("price".equals(key)||"price"==key)){
                        	  //doc.add(new NumericField());
                        	 /* FieldType fieldType = new FieldType(TextField.TYPE_STORED);
                        	  fieldType.setOmitNorms(true);
                        	  fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);*/
                        	  doc.add(new Field("price",
                        			  new NumericTokenStream().setDoubleValue(
                        					  value==null?new Double("9999999.99"):new Double(value.toString()))));
                          }
		   				  //增加document内容
		   				  doc.add(new TextField(String.valueOf(key),
		   						  ContentExtractor.extract(String.valueOf(value)),Store.YES));
		   			   }
		   		      indexWriter.addDocument(doc);
		   		  }
		   		 //释放资源
		   		 releaseIndexWriter(indexWriter , directory);
		   	}
		}
	}
	
	
	/**
	 *  清空所有索引库内容
	 * 
	 *@Title:emptyIndex
	 *@Description:
	 *   定时任务调度将所有索引库内容清空
	 *   |后期需要优化为定时更新索引内容；
	 *@param:@throws Exception
	 *@Return:void
	 *@author: 谢洪飞
	 *@Thorws:
	 */
	@SuppressWarnings("deprecation")
	@Override
	public void emptyIndex() throws Exception
	{
		
		//0.准备
	    IndexWriterConfig indexWriterConfig = null; 
		IndexWriter indexWriter = null;
		
		//1.读取search.properties配置信息
		String path = PropertiesUtil.path();
		Properties prop = PropertiesUtil.getProperties(path+"search.properties");
		String [] statementNames = prop.getProperty("statementName").trim().split(",");
		String indexPathPrefix = prop.getProperty("indexPath").trim();
		
		//2.遍历所有索引库，并删除
		if(null != statementNames && statementNames.length>0)
		{
			for (String statementName : statementNames) 
			    {
				    File indexFile = new File(indexPathPrefix+statementName+"_lucene"); 
				    //如果索引文件存在
				    if ( indexFile.exists())
				       { 
				    	  Directory directory = FSDirectory.open(indexFile);
				    	  IndexReader indexReader = IndexReader.open(directory);
				    	  indexWriterConfig = new IndexWriterConfig(Version.LUCENE_47, analyzer);
				    	  
				    	  indexWriter = new IndexWriter(directory, indexWriterConfig);
				    	  //删除索引名为：indexPathPrefix + statementName + _lucene 的文件
				    	  indexWriter.deleteAll();
				    	  //释放资源
				    	  releaseIndexWriter(indexWriter, directory);
				    	  indexReader.close();
				       }
			    }
		}
	}
	
	
	
	/**
	 *  索引查询
	 * 
	 *@Title:queryFromIndex
	 *@Description:从索引中检索要查询的内容
	 *@param:@param indexSearcher 索引查询器
	 *@param:@param keyWords 查询关键字
	 *@param:@param fields   要检索的索引列
	 *@param:@param page     页数(检索第几页内容)
	 *@param:@param pageSize 每页先是条数
	 *@param:@return  检索结果集合内容
	 *@param:@throws Exception
	 *@return:List<Search> 检索结果
	 *@author: 谢洪飞
	 *@Thorws:
	 */
	public List<Search> queryFromIndex(IndexSearcher indexSearcher 
			                          , String keyWords , String [] fields 
			                          ,int page ,int pageSize)
			                                                     throws Exception{
		
		
		// 默认IKAnalyzer()-false:实现最细粒度切分算法,true:分词器采用智能切分
		QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_47, fields, analyzer);
		//设置关键词中空格之间的关系，即存在空格时，空格之间的词是与、或的关系
		parser.setDefaultOperator(QueryParser.OR_OPERATOR);
		KeyWordsTool keywords = new KeyWordsTool();
		BooleanQuery mutilQuery = new BooleanQuery();
		keywords = (KeyWordsTool) JSONUtil.convertToObject(KeyWordsTool.class, keyWords);
		
		 if (StringUtils.isNotBlank(keywords.getPriceRangeSort()))
         {
			Query priceRangeQuery = 
			 NumericRangeQuery.newDoubleRange("price",keywords.getPriceRangeMin()==null?new Double("0.00"): new Double(keywords.getPriceRangeMin()),
					 keywords.getPriceRangeMax()==null?new Double("10000000.00"):new Double(keywords.getPriceRangeMax()), true, true);
			mutilQuery.add(priceRangeQuery,BooleanClause.Occur.MUST);
         }
        
		 
		Query query = parser.parse(keywords.getKeyWords());
		
		mutilQuery.add(query,BooleanClause.Occur.MUST);
		
		 
		 //1.增加对综合排序，销量排序，上市时间排序，价格排序等相关检索内容
		 //List<String> fieldList = Arrays.asList(fields);
		 List<SortField> sorts = new ArrayList<SortField>();
		 SortField sortField = null ;
		
		 
		 //判断检索项目中是否包含 "销量排序"
		 if (StringUtils.isNotBlank(keywords.getSaleCountSort()))
		  {
			 sortField = new SortField("saleCount", SortField.Type.INT, true);//true 销量降序
			 sorts.add(sortField);
		  }
		 if(StringUtils.isNotBlank(keywords.getPriceSort()))
		 {
			 switch (Integer.parseInt(keywords.getPriceSort())) {
			   case 1:
				   sortField = new SortField("price", SortField.Type.DOUBLE, false);//价格排序:从低到高
				  break;
			   case 2:
				   sortField = new SortField("price", SortField.Type.DOUBLE, true);//价格排序:从高到低
				  break;
			   case 3:
				   sortField = new SortField("price", SortField.Type.DOUBLE, false);//价格排序:从低到高
				  break;
			   case 4:
				   sortField = new SortField("price", SortField.Type.DOUBLE, true);//价格排序:从高到低
				  break;
			    default:
				  break;
			 }
			 sorts.add(sortField);
		 }
		 if (StringUtils.isNotBlank(keywords.getSaleDateSort()))
		 {
			 sortField = new SortField("saleDate", SortField.Type.STRING, true);//时间降序
			 sorts.add(sortField);
		 }
		 if (StringUtils.isNotBlank(keywords.getPriceRangeSort()))
         {
			
         }
		
		 Sort sort = null;
		 
		 if (sorts.size()>0)
		  {
			 	sort = new Sort(sorts.get(0));
		  }
		 
		return paginationQuery(analyzer, indexSearcher, mutilQuery, sort, page, pageSize);
	}
	
	
	/**
	 *  分页 |高亮 查询
	 * 
	 *@Title:paginationQuery
	 *@Description:分页|高亮查询
	 *@param:@param analyzer      分词器
	 *@param:@param indexSearcher 检索器
	 *@param:@param query         查询类型对象
	 *@param:@param topDocs       评分筛选项
	 *@param:@param currentPage   当前页
	 *@param:@param pageSize      每页条数
	 *@param:@return
	 *@return:List<Search>
	 *@author:  谢洪飞
	 * @throws Exception 
	 * @Thorws:
	 */
	public List<Search> paginationQuery( Analyzer analyzer ,
			                             IndexSearcher indexSearcher ,
			                             BooleanQuery query , Sort sort ,
			                             int currentPage , int pageSize ) throws Exception
	{

	List<Search> searchResults = new ArrayList<Search>();
		/**
		 * Parameters:
         * Sort sort - the sort criteria (SortFields).
         * int numHits - the number of results to collect.
         * boolean fillFields - specifies whether the actual field values should be returned on the results (FieldDoc).
         * boolean trackDocScores - specifies whether document scores should be tracked and set on the results.
         *                          Note that if set to false, then the results' scores will be set to Float.NaN.
         *                          Setting this to true affects performance, as it incurs the score computation
         *                          on each competitive result. Therefore if document scores are not required by the application,
         *                          it is recommended to set it to false.
         * boolean trackMaxScore - specifies whether the query's maxScore should be tracked and set on the resulting TopDocs.
         *                         Note that if set to false, TopDocs.getMaxScore() returns Float.NaN.
         *                         Setting this to true affects performance as it incurs the score computation on each result.
         *                         Also, setting this true automatically sets trackDocScores to true as well.
         * boolean docsScoredInOrder - specifies whether documents are scored in doc Id order or not by the given Scorer in Collector.setScorer(Scorer).
         * Returns:
         * TopFieldCollector - a TopFieldCollector instance which will sort the results by the sort criteria.
		 */
	 

	ScoreDoc doc[] = new ScoreDoc[pageSize];
		
            if (null != sort )
             {
            	TopFieldCollector results 
                   = TopFieldCollector.create(sort,currentPage+pageSize, false,false,false,false);
            	
            	indexSearcher.search(query, results);
        		
            	TopDocs topDocs = results.topDocs((currentPage*pageSize)-pageSize);
            	
        		if (topDocs.totalHits<=0)
       		    {
       			   return searchResults;
       		    }
        		doc = topDocs.scoreDocs;
             }
            else
             {
            	TopScoreDocCollector results 
                       = TopScoreDocCollector.create(currentPage+pageSize, false);
            	indexSearcher.search(query, results);
        		TopDocs topDocs = results.topDocs((currentPage*pageSize)-pageSize);
        		if (topDocs.totalHits<=0)
       		     {
       			   return searchResults;
       		     }
        		doc = topDocs.scoreDocs;
             }
		//获取search.properties配置文件
		Properties prop = PropertiesUtil.getProperties(PropertiesUtil.path()+"search.properties");
		//根据配置内容构造格式化编辑器
		SimpleHTMLFormatter simpleHTMLFormatter =
			                      new SimpleHTMLFormatter(prop.getProperty("startTag"),prop.getProperty("endTag"));
		//创建高亮构造器
		Highlighter highlighter = new Highlighter(simpleHTMLFormatter, new QueryScorer(query));
		
		for (int i = 0 ; i < doc.length ; i ++)
		   {
			   Document document = indexSearcher.doc(doc[i].doc);
			  
			   //内容添加高亮显示效果     new SimpleFragmenter(200)：配置高亮数据长度
			   highlighter.setTextFragmenter(new SimpleFragmenter(200));
			   
			   TokenStream tokenStream =
				   analyzer.tokenStream("content",new StringReader(
						   document.get("content")==null?"":document.get("content")));
			   
			   //content高亮显示
			   String content =
				   highlighter.getBestFragment(
				          tokenStream, document.get("content")==null?"":document.get("content"));
			   
			   TokenStream titleToken =
				     analyzer.tokenStream("title", new StringReader(
				    		 document.get("title")==null?"":document.get("title")));
			   
			   
			   //title高亮显示
			   String title =
				   highlighter.getBestFragment(
						   titleToken, document.get("title")==null?"":document.get("title"));
			   
			   TokenStream storeNameToken =
				    analyzer.tokenStream("storeName", new StringReader(
				    		document.get("storeName")==null?"":document.get("storeName")));
			  
			   String storeName =
				    highlighter.getBestFragment(
				    		storeNameToken, document.get("storeName")==null?"":document.get("storeName"));
			   
			   Map<String , String > map = new HashMap<String , String >();
			   
			   for (IndexableField field : document.getFields())
			      {
				   
				      /** 更改高亮显示样式 */
				     if ("content".equals(field.name()))
				       {
				    	 if (null != content)
				    	   { map.put(field.name(), content); }
				    	 else 
				    	   { map.put(field.name(), document.get("content")==null?"":document.get("content")); }
				    	   
				    	  
				       }
				     else if ("title".equals(field.name()))
				      {
				    	 if (null != title)
				    	   {  map.put(field.name(), title); }
				    	 else
				    	  { map.put(field.name(),  document.get("title")==null?"":document.get("title")); }
				    	 
				      }
				     else if ("storename".equalsIgnoreCase(field.name()))
				     {
				    	 if(null != storeName)
				    	  { map.put(field.name(), storeName);}
				    	 else
				    	  { map.put(field.name(), document.get("storeName")==null?"":document.get("storeName")); }
				     }
				     else
				     {
				    	 map.put(field.name(), field.stringValue()); 
				     }
			      }
			   Search search = (Search) ReflectObject.TconvertMap(Search.class, map);
			   searchResults.add(search);
		   }
		
		return searchResults;
	}

	
	/**
	 *  获取IndexSearcher对象
	 *  
	 *@Title:getSearcher
	 *@Description:
	 *@param:@param fields
	 *@param:@return
	 *@return:IndexSearcher
	 *@author: 谢洪飞
	 * @throws IOException 
	 * @Thorws:
	 */
	@SuppressWarnings("deprecation")
	@Override
	public IndexSearcher getSearcher(String  fields) throws IOException{
		
		//获取索引库位置
		String indexPath =
			PropertiesUtil.getProperties(PropertiesUtil.path()
			+"search.properties").getProperty("indexPath").trim()+fields+"_lucene";
		
		Directory directory = FSDirectory.open(new File(indexPath));
		IndexReader indexReader = IndexReader.open(directory);
		IndexSearcher indexSearcher = new IndexSearcher(indexReader);
		
		return indexSearcher;
	}


	@Override
	public IndexSearcher getSearchers(String fields) throws IOException {

		String [] fieldsArr = fields.split(",");
		IndexReader [] indexReaders = new IndexReader[fieldsArr.length];
		
		int index = 0;
		for (String path : fieldsArr)
		  {
			  String indexPath =
				  PropertiesUtil.getProperties(PropertiesUtil.path()
				  +"search.properties").getProperty("indexPath").trim()+path+"_lucene";
			  
			  Directory directory = FSDirectory.open(new File(indexPath));
			  indexReaders[index++] = DirectoryReader.open(directory);
		  }
		
		return new IndexSearcher(new MultiReader(indexReaders, true));
	}

	
	
	/**
	 *   根据检索条件，统计查询结果数
	 * 
	 * @param indexSearcher
	 * @param keyWords 输入框中内容
	 * @param f
	 * @return
	 * @throws Exception
	 */
	@SuppressWarnings("static-access")
	@Override
	public int count(IndexSearcher indexSearcher , String keyWords , String [] f) throws Exception
	{
		QueryParser queryParser = new MultiFieldQueryParser(Version.LUCENE_47, f, analyzer);
		queryParser.setDefaultOperator(queryParser.OR_OPERATOR);
		
		KeyWordsTool keywords = new KeyWordsTool();
		
		keywords = JSONUtil.convertToObject(KeyWordsTool.class, keyWords);
		Query query = queryParser.parse(keywords.getKeyWords());
		
		BooleanQuery mutilQuery = new BooleanQuery();
		mutilQuery.add(query,BooleanClause.Occur.MUST);
		
		 if (StringUtils.isNotBlank(keywords.getPriceRangeSort()))
         {
			Query priceRangeQuery = 
			 NumericRangeQuery.newDoubleRange("price",keywords.getPriceRangeMin()==null?new Double("0.00"): new Double(keywords.getPriceRangeMin()),
					 keywords.getPriceRangeMax()==null?new Double("10000000.00"):new Double(keywords.getPriceRangeMax()), true, true);
			mutilQuery.add(priceRangeQuery,BooleanClause.Occur.MUST);
         }
		
		
		TopDocs topDocs = indexSearcher.search(mutilQuery, 10000);
		return topDocs.totalHits;
	}
	
	/**
	 *   释放资源
	 * 
	 *@Title:releaseIndexWriter
	 *@Description:释放资源
	 *@param:@param indexWriter
	 *@param:@param directory
	 *@Return:void
	 *@author: 谢洪飞
	 *@Thorws:
	 */
	private void releaseIndexWriter(IndexWriter indexWriter , Directory directory){
		
		 try
	   		{
	   			if(null != indexWriter)
	   			{
		   			indexWriter.close();
		   		}
			}
	   	 catch (Exception e)
	   	   {
	   		 logger.info("释放 IndexWriter对象发生异常!");
		     e.printStackTrace();
	   	   }
	   	 finally
	   	  {
			  try 
			   {
				  if(null != directory
						  && IndexWriter.isLocked(directory))
				  {
					  IndexWriter.unlock(directory);
				  }
			   }
			  catch (Exception e2)
			   {
				logger.info("释放 IndexWriter对象对"+directory+"的锁定发生异常!");
				e2.printStackTrace();
			   }
		  }
	}
	
}
