/**
 * <p>Copyright (c) 2011 深圳市鹏途信息技术有限公司 </p>
 * <p>				   All right reserved. 		     </p>
 *
 * <p>项目名称 ： 	东莞交通工程质量监督综合业务系统        </p>
 * <p>创建者   :	libitum
 *
 * <p>描   述  :   ConveyDao.java for com.pengtu.dao.work    </p>
 *
 * <p>最后修改 : $: 2011-7-23-下午05:04:37 v 1.0.0	 libitum   $     </p>
 *
*/

package com.pengtu.dao.admin;

import java.io.File;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.DateTools.Resolution;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CachingWrapperFilter;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.highlight.Formatter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.springframework.stereotype.Repository;
import org.wltea.analyzer.lucene.IKAnalyzer;
import org.wltea.analyzer.lucene.IKQueryParser;
import org.wltea.analyzer.lucene.IKSimilarity;

import com.pengtu.dao.hibernate.HibernateDao;
import com.pengtu.entity.admin.Convey;
import com.pengtu.model.CoveryModel;
import com.pengtu.utils.DateUtils;

/**
 *
 * ConveyDao
 *
 * 2011-7-23 下午05:04:37
 *
 * @version 1.0.0
 *
 */
@Repository
public class ConveyDao extends HibernateDao<Convey, String> {

	public void createIndexByLucene(String index) {
		try {
			File fsDir = new File(index+"//convey");
			Directory dir = FSDirectory.open(fsDir);
			Analyzer analyzer = new IKAnalyzer();
			IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_31,
					analyzer);
			iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);// 总是重新创建
			if (IndexWriter.isLocked(dir)) {
				IndexWriter.unlock(dir);
				}
			IndexWriter iw = new IndexWriter(dir, iwc);
			List<Convey> conveyList = super.getAll();
			int size = conveyList.size();
			long startTime = new Date().getTime();
			
			//添加到索引里去
			Document doc;
			for (Convey convey : conveyList) {
				doc = new Document();
				doc.add(new Field("id", convey.getId(), Field.Store.YES,
						Field.Index.ANALYZED));
				doc.add(new Field("title", convey.getTitle(), Field.Store.YES,
						Field.Index.ANALYZED));
				doc.add(new Field("content", convey.getContent(),
						Field.Store.YES, Field.Index.ANALYZED));
//				doc.add(new Field("keywords", convey.getKeywords(),
//						Field.Store.YES, Field.Index.ANALYZED));
				
				doc.add(new Field("createDate", DateTools.dateToString(
						convey.getCreateDate(), Resolution.MINUTE),
							Field.Store.YES, Field.Index.NOT_ANALYZED));
				iw.addDocument(doc);
			}
			// 自动优化合并索引文件
			iw.optimize();
			iw.close();
			long endTime = new Date().getTime();
			System.out.println("一共" + size + ",这花费了" + (endTime - startTime)
					+ " 毫秒来把Convey文档增加到索引里面去!");
		} catch (Exception e) {
			e.printStackTrace();
		}
		
	}

	public List<CoveryModel> SearchByLucene(String index, String keyword,
			String startDate, String endDate) {
		File fsDir = new File(index+"//convey");
		Analyzer analyzer = new IKAnalyzer();
		List<CoveryModel> coveryModelList = new ArrayList<CoveryModel>();
		try {
			// 索引查询
			IndexReader reader = IndexReader
					.open(FSDirectory.open(fsDir), true); // only searching, so
															// read-only=true
			IndexSearcher isearcher = new IndexSearcher(reader);

			BooleanQuery booleanQuery = new BooleanQuery();

			Query query1 = IKQueryParser.parse("title", keyword);// 分析检索词
			query1.setBoost(1.5f);
			booleanQuery.add(query1, Occur.SHOULD);

			Query query2 = IKQueryParser.parse("content", keyword);// 分析检索词
			query2.setBoost(1.0f);
			booleanQuery.add(query2, Occur.SHOULD);

//			Query query3 = IKQueryParser.parse("keywords", keyword);// 分析检索词
//			query3.setBoost(1.0f);
//			booleanQuery.add(query3, Occur.SHOULD);

			BooleanQuery filterBooleanQuery = new BooleanQuery();
			TermRangeQuery rangeQuery = new TermRangeQuery("createDate",startDate, endDate, true, true);
			filterBooleanQuery.add(rangeQuery, BooleanClause.Occur.MUST);

			// 将booleanQuery封装到Filter中
			Filter filter = new CachingWrapperFilter(new QueryWrapperFilter(
					filterBooleanQuery));

			TopScoreDocCollector collector = TopScoreDocCollector.create(100,
					true);
			//设置相识度
			isearcher.setSimilarity(new IKSimilarity());
			isearcher.search(booleanQuery, filter, collector);

			ScoreDoc[] hits = collector.topDocs(0, 100).scoreDocs;
			QueryScorer qs1 = new QueryScorer(query1);
			QueryScorer qs2 = new QueryScorer(query2);
//			QueryScorer qs3 = new QueryScorer(query3);
			for (ScoreDoc h : hits) {
				CoveryModel coveryModel = new CoveryModel();
				Document d = isearcher.doc(h.doc);
				String title = d.get("title");
				String content = d.get("content");
//				String keywords = d.get("keywords");
				//高亮效果
				Formatter formatter = new SimpleHTMLFormatter("<font style='background-color:#F9F400;'>", "</font>");
				Highlighter h1 = new Highlighter(formatter, qs1);
				Highlighter h2 = new Highlighter(formatter, qs2);
//				Highlighter h3 = new Highlighter(formatter, qs3);
				String title1 = h1.getBestFragment(analyzer, "title", title);
				String content1 = h2.getBestFragment(analyzer,"content", content);
//				String keywords1 = h3.getBestFragment(analyzer, "keywords",keywords);
				coveryModel.setId(d.get("id"));
				if (title1 != null) {
					coveryModel.setTitle(title1);
				} else {
					coveryModel.setTitle(title);
				}
				if (content1 != null) {
					//转化html代码
					content1 = com.pengtu.utils.StringUtils.changeHTML(content1);
					//将font包含的文当转化成html代码
					content1 = content1
							.replace("&lt;font&nbsp;", "<font ")
							.replace("'&gt;", "'>")
							.replace("&lt;/font", "</font")
							.replace("font&gt;", "font>");
					coveryModel.setContent(content1);
				} else {
					content = com.pengtu.utils.StringUtils.changeHTML(content);
					if(content.length()>100){ content = content.substring(0, 100);}
					coveryModel.setContent(content);
				}
//				if(keywords1 != null){
//					coveryModel.setKeywords(keywords1);
//				}else{
//					coveryModel.setKeywords(keywords);
//				}
				coveryModel.setCreateDate(DateUtils.toDate(d.get("createDate")));
				coveryModelList.add(coveryModel);
			}
			System.out.println("在发文covery中找到：" + hits.length + " 个");
			isearcher.close();

		} catch (Exception e) {
			e.printStackTrace();
		}
		return coveryModelList;
	}

}
