package com.liusy.serachengine;



import java.io.IOException;


import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKQueryParser;
import org.apache.lucene.queryparser.classic.QueryParser;

import com.liusy.taskengine.executemodel.AbstractExecuteModel;





/**
 * 
 * 单例 总搜索器
 * 
 * @author liusy 2013-02-19
 *
 */
public class SerachIndex {
  
	private static Logger log = Logger.getLogger(AbstractExecuteModel.class);

	private String indexPath = System.getProperty("user.dir") + "\\index\\";
	
	private Map<String,IIndexModel> map = new HashMap<String,IIndexModel>();
	
	private String prefixHTML = "<font class=\"highlight\">";
	   
	private String suffixHTML = "</font>";
	
	//用于多目录搜索中的文档转换,,暂未实现
	private IConvertModel docConvertMap = new FileConvertModel(indexPath);
	   
	//private static SerachIndex serachIndex = new SerachIndex();
	
	private static ThreadLocal<SerachIndex> serachIndex = new ThreadLocal<SerachIndex>();
	
	public SerachIndex() {
	}
	
	
	public IConvertModel getDocConvertMap() {
		return docConvertMap;
	}


	public void setDocConvertMap(IConvertModel docConvertMap) {
		this.docConvertMap = docConvertMap;
	}


	/**
	 * 单例 
	 * 
	 * @return
	 */
	public static synchronized SerachIndex getSerachIndex()
	{
		if(null==serachIndex.get())
			serachIndex.set(new SerachIndex());
		return serachIndex.get();
	}
	
	
	/**
	 * 根据路径得到搜索器
	 * 
	 * @param indexPath
	 * @return
	 */
	public void putIndexModel(IIndexModel indexModel) {
	
		map.put(indexModel.getIndexPath().substring(indexModel.getIndexPath().lastIndexOf("\\")+1, indexModel.getIndexPath().length()), indexModel);
	}
	
	
	public IIndexModel getIndexModel(String indexPath){
		return map.get(indexPath);
	}
	
	
	public void createIndex(String indexPath,IConvertModel cm){
		CrateOrupdateIndex(indexPath,cm,true);
	}
	
	
	public void UpdateIndex(String indexPath,IConvertModel cm){
		CrateOrupdateIndex(indexPath,cm,false);
	}
	
	public void CrateOrupdateIndex(String indexPath,IConvertModel cm,boolean create)
	{
		if(!map.containsKey(indexPath))
			return;
		IIndexModel indexModel = map.get(indexPath);
			indexModel.CreateIndex(cm, create);
	}
	

	private void notifyUpdateIndexSerarch(String indexPath)
	{
		IIndexModel indexModel = map.get(indexPath);
		indexModel.UpdateIndexSearcher();
	}
	
	/**
	 * 单个搜索器搜索（对单个目录搜索）
	 * 
	 * @param indexPath
	 * @param queryStr
	 * @param pageNo
	 * @param pageSize
	 * @return
	 */
	public Map<String, Object> SimpleSearcher(String indexPath, String queryStr,int pageNo, int pageSize)  {
		if (!map.containsKey(indexPath))
			return null;
		IIndexModel indexModel = map.get(indexPath);
		Analyzer analyzer = indexModel.getAnalyzer();
		IndexSearcher searcher = indexModel.getIndexSearcher();
		this.docConvertMap = indexModel.getConvertModel();
		Query query = getQuery(queryStr, analyzer);
		Map<String, Object> mapPage = SearchPage(query, searcher, pageNo,pageSize);
		return mapPage;
	}
	/**
	 * 多个搜索器搜索（多个目录）
	 * 未实现
	 * 
	 * 
	 * @param queryStr
	 * @param pageNo
	 * @param pageSize
	 * @return
	 */
	public Map<String, Object> MultiSearcher(String queryStr,int pageNo, int pageSize)  {
        //MultiSearcher searcher = new MultiSearcher(searchers);
		return null;
	}
	
	/**
	 * 多线程搜索（多个目录）
	 * 搜索操作为每个Searchable分配一个线程，直到所有线程都完成其搜索。基本搜索和进行过滤的搜索是并行执行的。
	 * 未实现
	 * 
	 * @param queryStr
	 * @param pageNo
	 * @param pageSize
	 * @return
	 */
	public Map<String, Object> ParallelMultiSearcher(String queryStr,int pageNo, int pageSize)  {
		//ParallelMultiSearcher searcher =new ParallelMultiSearcher(searchers);
		return null;
	}

	
	/**
	 * 远程目录搜索（多个目录）
	 *  未实现
	 *  
	 * @param queryStr
	 * @param pageNo
	 * @param pageSize
	 * @return
	 */
	public Map<String, Object> RemoteParallelMultiSearcher(String queryStr,int pageNo, int pageSize)  {

		return null;
	}
	/**
	 * 远程目录搜索（多个目录）
	 *  未实现
	 *  
	 * @param queryStr
	 * @param pageNo
	 * @param pageSize
	 * @return
	 */
	public Map<String, Object> RemoteSimpleSearcher(String queryStr,int pageNo, int pageSize)  {

		return null;
	}
	/**
	 * 查询条件 设置
	 * 
	 * @param queryStr 
	 * @param indexModel
	 * @return
	 */
	private Query getQuery(String queryStr, Analyzer analyzer) {
		Query query = null;
		try {
			if (queryStr.length() > 0 && queryStr.substring(0, 1).equals("\"") && queryStr.substring(queryStr.length() - 1, queryStr.length()) .equals("\"")) {
				QueryParser parser = new QueryParser(Version.LUCENE_43,"content", analyzer);
				query = parser.parse(queryStr + "~0");
			} else {
				String[] strarray = queryStr.split(" ");
				List<String> keyList = new ArrayList<String>();
				List<BooleanClause.Occur> bcList = new ArrayList<BooleanClause.Occur>();
				for (int i = 0; i < strarray.length; i++) {
					keyList.add("content");
					bcList.add(BooleanClause.Occur.MUST);
				}
				String[] key = new String[keyList.size()];
				keyList.toArray(key);
				BooleanClause.Occur[] flags = new BooleanClause.Occur[bcList.size()];
				bcList.toArray(flags);
				query = IKQueryParser.parseMultiField(key, strarray, flags);
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
		return query;
	}
	
	/**
	 * 根据查询器、索引器进行分页搜索
	 * 
	 * @param query
	 * @param indexModel
	 * @param pageNo
	 * @param pageSize
	 * @return
	 */
	@SuppressWarnings("unchecked")
	private Map<String, Object> SearchPage(Query query, IndexSearcher searcher, int pageNo, int pageSize) {
		Map<String, Object> mapPage = new HashMap<String, Object>();
		List<Map<String, Object>> searchlist = new ArrayList<Map<String, Object>>();
		try {
			// 查询搜索引擎
			TopDocs result = searcher.search(query, 10);
			// 上一页的最后一个document索引
			int index = (pageNo - 1) * pageSize;
			ScoreDoc scoreDoc = null;
			// 如果当前页是第一页面scoreDoc=null。
			if (index > 0) {
				// 因为索引是从0开始所以要index-1
				scoreDoc = result.scoreDocs[index - 1];
			}
			// 分页处理
			TopDocs hits = searcher.searchAfter(scoreDoc, query, pageSize);
			// 设置分页的总记录数 .
			int totalNum = hits.totalHits;
			log.info(hits.totalHits);
			// 循环hits.scoreDocs数据，并使用indexSearch.doc方法把Document还原，再拿出对应的字段的值
			for (int i = 0; i < hits.scoreDocs.length; i++) {
				ScoreDoc sdoc = hits.scoreDocs[i];
				Document doc = searcher.doc(sdoc.doc);
				
	
				searchlist.add((Map<String, Object>)docConvertMap.DocConvertMap(doc));
			}
			mapPage.put("PageNo", pageNo);
			mapPage.put("pageSize", pageSize);
			mapPage.put("TotalNum", totalNum);
			mapPage.put("data", searchlist);
		} catch (IOException e) {
			e.printStackTrace();
		}
		return mapPage;

	}
	
	
	
	
	


	
	 public void searchTalent(Query query, IIndexModel indexModel,int pageNo, int pageSize) {
		 
			IndexSearcher searcher = indexModel.getIndexSearcher();
			Analyzer analyzer = indexModel.getAnalyzer();
			//FullIndexPagination p = null;
			Document doc = null;
			List<Map<String, Object>> searchlist = new ArrayList<Map<String, Object>>();
		
		 //   QueryParser parser = new QueryParser();//(Version.LUCENE_40, fieldName, analyzer);
	      //  parser.setDefaultOperator(QueryParser.AND_OPERATOR);

	    try {
		 // TermQuery query = new TermQuery(new Term("emailId", "emailId"));
		  SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter(prefixHTML, suffixHTML);
	      Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(query));
	      highlighter.setTextFragmenter(new SimpleFragmenter(50));
	      //高亮
	     // String emailAttach = highlighter.getBestFragment(analyzer, "emailAffix", doc.get("emailAttach"));
	      //评分
	     // searcher.setSimilarity(new Similarity());
	    //每个文档的打分的详情。
	    //explain(Weight weight, int doc) 
	    //Expert: low-level implementation method Returns an Explanation that describes how doc scored against weight.
	    // System.out.println("Explanation:"+ (searcher.explain(query, hits[i].doc)));

	        // TopDocs results= searcher.search(query, 999999999,new Sort(new SortField("emailSendDate",(Parser) SortField.FIELD_DOC,false)));
	      TopDocs results= searcher.search(query, 999999999);
	         ScoreDoc[] hits = results.scoreDocs;
	         System.out.println("命中数:"+results.totalHits);

	         
//	         TopScoreDocCollector res = TopScoreDocCollector.create(50, false);
//	         System.out.println("total hits :"+res.getTotalHits());
//	        searcher.search(query, res);
//	        TopDocs tds = res.topDocs(20, 30);
//	        ScoreDoc[] hits=  tds.scoreDocs;
	        
	         
         if ((((pageNo - 1) * pageSize) + pageSize) <= hits.length) {
	            for (int i = ((pageNo - 1) * pageSize); i < ((pageNo - 1) * pageSize) + pageSize; i++) {
	               doc = searcher.doc(hits[i].doc);
	               if (null != doc) {
	                  searchlist.add((Map<String, Object>) indexModel.getConvertModel().DocConvertMap(doc));
	               }
	            }
	         }
	         else {
	            for (int i = (pageNo - 1) * pageSize; i < ((pageNo - 1) * pageSize) + (hits.length % pageSize); i++) {
	               doc = searcher.doc(hits[i].doc);
	               if (null != doc) {
	                   searchlist.add((Map<String, Object>) indexModel.getConvertModel().DocConvertMap(doc));
	               }
	            }
	         }
	         int totalnum = hits.length;
//	         p = new FullIndexPagination();
//	         p.setSearchlist(searchlist);
//	         p.setPageNo(pageNo);
//	         p.setPageSize(pageSize);
//	         p.setTotalNum(totalnum);
	         
		} catch (Exception e) {
			// TODO: handle exception
		}
	      //return p;
	   }
  
}
