package com.ossean.match.lucene;

import java.io.IOException;
import java.util.HashMap;
import java.util.List;

import javax.annotation.Resource;

import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.Similarity;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.ossean.match.dao.ProjectDao;
import com.ossean.match.utils.Normalizer;

public class LuceneSearch {
	@Resource
	private static ProjectDao projectDao;
	private static Logger logger = LoggerFactory.getLogger(LuceneSearch.class);
	/**
	 * memos match to projects by Lucene
	 * 
	 * @param keyWords
	 * @param searchField
	 * @param weight
	 * @param matchMap
	 * @param memoHistory
	 * @param indexReader
	 * @return
	 */
	public static HashMap<Integer, Double> memoToPrjMatchByLucene(String keyWords,
			List<String> keyWordsList, String searchField, double weight,
			HashMap<Integer, Double> matchMap, IndexReader prjIndexReader) {
		try {
			IndexSearcher is = new IndexSearcher(prjIndexReader);
			BooleanQuery query = new BooleanQuery();
			Similarity similarity = new DefaultSimilarity(){
				@Override
				public float coord(int overlap, int maxOverlap) {
					return overlap*overlap*overlap / (float)maxOverlap;
				}
				@Override
				public float queryNorm(float sumOfSquaredWeights) {
				  return 1.0f;
			    }  
			};
			is.setSimilarity(similarity);
			
			for(String keyWordsTerm : keyWordsList){
				Term term = new Term(searchField, keyWordsTerm);
				TermQuery tq = new TermQuery(term);
				if(keyWordsTerm.getBytes().length != keyWordsTerm.length()){ //汉字设boost 0.002
					tq.setBoost(0.02f);
				}
				else {
					double curTermDocFreq = prjIndexReader.docFreq(term);
					if (keyWordsList.size() > 1 && curTermDocFreq > 10) {
						if (curTermDocFreq <= 100) {
							tq.setBoost((float) (1/(curTermDocFreq/10)));
						}
						else if(curTermDocFreq > 100 && curTermDocFreq <= 400){
							tq.setBoost(0.09f);
						}
						else {
							tq.setBoost(0.08f);
						}
						//tq.setBoost((float) (1/(curTermDocFreq/10)));
					}
					else {
						tq.setBoost(1.1f);
					}
				}
				query.add(tq, BooleanClause.Occur.SHOULD);
			}
			TopDocs td = is.search(query, 10000);  
			ScoreDoc[] sds = td.scoreDocs;
			for (ScoreDoc sd : sds) {
				Document d = is.doc(sd.doc);
				String prjId = d.get(LuceneIndex.prjIdFieldName);
				String[] prjNames = d.getValues(searchField);
				for(String prjName : prjNames){
					if (keyWords.contains(prjName)) {
						int pId = Integer.parseInt(prjId);
						if (matchMap.containsKey(pId)) {
							matchMap.put(pId, matchMap.get(pId) + weight + sd.score/1000);
						} else
							matchMap.put(pId, weight + sd.score/1000);
					}
				}	
			}
		} catch (IOException e) {
			logger.error("memoToPrjMatchByLucene IOException: " + e);
		} 
		return matchMap;
	}
	

	/**
	 * projects match to memos by Lucene
	 * 
	 * @param prjName
	 * @param projId
	 * @param searchFiled
	 * @param weight
	 * @param map
	 * @param memoHistory
	 * @param indexReader
	 * @return
	 */
	public static HashMap<Integer, Double> prjToMemoMatchByLucene(
			String prjName, String searchField, double weight,
			HashMap<Integer, Double> map, IndexReader memoIndexReader) {

		try {
			IndexSearcher is = new IndexSearcher(memoIndexReader);
			List<String> prjNameList = Normalizer.getList(prjName);
			BooleanQuery query = new BooleanQuery();
			
			for(String prjNameTerm : prjNameList){
				Term term = new Term(searchField, prjNameTerm);
				TermQuery tq = new TermQuery(term);
				query.add(tq, BooleanClause.Occur.MUST);  //项目名分词后的每个term都必须在帖子中出现
			}
			TopDocs td = is.search(query, 1000000);
			ScoreDoc[] sds = td.scoreDocs;
			for (ScoreDoc sd : sds) {
				Document d = is.doc(sd.doc);
				String postId = d.get(LuceneIndex.memoIdFieldName);
				int pId = Integer.parseInt(postId);
				if (map.containsKey(pId)) {
					map.put(pId, map.get(pId) + weight + sd.score/1000);
				} else
					map.put(pId, weight + sd.score/1000);	
			}
		} catch (IOException e) {
			logger.error("prjToMemoMatchByLucene IOException: " + e);
		}
		return map;
	}
	
	/**
	 * 项目名和项目别名与帖子标签之间的匹配
	 * @param tagStr
	 * @param searchField
	 * @param weight
	 * @param map
	 * @param indexReader
	 * @return
	 */
	public  static HashMap<Integer, Double> searchMemoTags(String tagStr, String searchField, String idField, double weight,
			HashMap<Integer, Double> map, IndexReader indexReader) {

			try {
				IndexSearcher is = new IndexSearcher(indexReader);
				Term t = new Term(searchField, tagStr);
				Query query = new TermQuery(t);
				TopDocs td = is.search(query, 100000);
				ScoreDoc[] sds = td.scoreDocs;
				for (ScoreDoc sd : sds) {
					Document d = is.doc(sd.doc);
					String postId = d.get(idField);
					int pId = Integer.parseInt(postId);
					if (map.containsKey(pId)) {     
						map.put(pId, map.get(pId) + weight);
					} else {      
						map.put(pId, weight);
					}	
				}
			} catch (IOException e) {
				logger.error("searchMemoTags IOException: " + e);
			}
			return map;
	}
	
	/**
	 * 项目标签与帖子标签
	 * @param tagStr
	 * @param searchField
	 * @param idField
	 * @param weight
	 * @param map
	 * @param indexReader
	 * @return
	 */
	public  static HashMap<Integer, Double> searchByPrjTag(String tagStr, String searchField, String idField, double weight,
			HashMap<Integer, Double> map, IndexReader indexReader) {

			try {
				IndexSearcher is = new IndexSearcher(indexReader);
				Term t = new Term(searchField, tagStr);
				Query query = new TermQuery(t);
				TopDocs td = is.search(query, 100000);
				ScoreDoc[] sds = td.scoreDocs;
				for (ScoreDoc sd : sds) {
					Document d = is.doc(sd.doc);
					String postId = d.get(idField);
					int pId = Integer.parseInt(postId);
					if (map.containsKey(pId)) {    //有项目名和项目别名的匹配时才加入标签匹配的结果
						map.put(pId, map.get(pId) + weight);
					}	
				}
			} catch (IOException e) {
				logger.error("prjToMemoMatchByLucene IOException: " + e);
			}
			return map;
	}
	
	/**
	 * 项目标签搜索帖子标题
	 * @param tagStr
	 * @param searchField
	 * @param idField
	 * @param weight
	 * @param map
	 * @param indexReader
	 * @return
	 */
	public static  HashMap<Integer, Double> searchByPrjTagInMemoTitle(String tagStr, String searchField, String idField, double weight,
			HashMap<Integer, Double> map, IndexReader indexReader) {

			try {
				IndexSearcher is = new IndexSearcher(indexReader);
				List<String> tagNameList = Normalizer.getList(tagStr);
				BooleanQuery query = new BooleanQuery();
				for(String tagNameTerm : tagNameList){
					Term t = new Term(searchField, tagNameTerm);
					TermQuery tq = new TermQuery(t);
					query.add(tq, BooleanClause.Occur.MUST);
				}
				TopDocs td = is.search(query, 100000);
				ScoreDoc[] sds = td.scoreDocs;
				for (ScoreDoc sd : sds) {
					Document d = is.doc(sd.doc);
					String postId = d.get(idField);
					int pId = Integer.parseInt(postId);
					if (map.containsKey(pId)) {
						map.put(pId, map.get(pId) + weight);
					}	
				}
			} catch (IOException e) {
				logger.error("prjToMemoMatchByLucene IOException: " + e);
			}
			return map;
	}


	// 获得每个帖子匹配到的标签个数
//	public static HashMap<Integer, Integer> tagsMatch(String idField,
//			String tags, String searchField, IndexReader indexReader) {
//		HashMap<Integer, Integer> tagsMatchNum = new HashMap<Integer, Integer>();
//		try {
//			IndexSearcher is = new IndexSearcher(indexReader);
//			QueryParser parser = new QueryParser(searchField, new IKAnalyzer(true));
//			Query query = parser.parse(tags);
//			TopDocs td = is.search(query, 100000);
//			ScoreDoc[] sds = td.scoreDocs;
//			for (ScoreDoc sd : sds) {
//					Document d = is.doc(sd.doc);
//					String postId = d.get(idField);
//					int pId = Integer.parseInt(postId);
//					Explanation explanation = is.explain(query, sd.doc);
//					int hitNum = getHitTermsNum(explanation);
//					if (tagsMatchNum.containsKey(pId)) {
//						tagsMatchNum.put(pId, tagsMatchNum.get(pId) + hitNum);
//					} else
//						tagsMatchNum.put(pId, hitNum);
//				}
//			} catch (IOException e) {
//				logger.error("tagsMatch IOException: " + e);
//			} catch (ParseException e) {
//				logger.error("tagsMatch ParseException: " + e);
//			}
//
//		return tagsMatchNum;
//	}
	
	//get the number of terms hitted in docs
	public static int getHitTermsNum(Explanation explanation){
		int num = 0;
		String coord = explanation.getDetails()[(explanation.getDetails().length - 1)].toString();
		if (coord.length() > 20 || coord.length() <= 0) {
			num = 1;
		}
		else {
			String numStr = coord.substring(coord.indexOf('(')+1, coord.indexOf('/'));
			num = Integer.parseInt(numStr);
		}
		return num;
	}
	
}
