package com.kfpanda.search.biz.searchmg;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;

import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.springframework.stereotype.Service;
import org.wltea.analyzer.lucene.IKAnalyzer;
import org.wltea.analyzer.lucene.IKQueryParser;

import com.kfpanda.search.entity.SearchField;
import com.kfpanda.search.index.NormalIndexReader;
import com.util.common.page.PageImp;

@Service
public class SearchBIZ {
	
	private final Logger logger = Logger.getLogger(SearchBIZ.class);
	
	private final int TOP_NUM = 100;
	
	public List<Document> fileSearch(SearchField searchField, PageImp paging) {
		String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-raw] [-norms field] [-paging hitsPerPage]";
		usage += "\n\tSpecify 'false' for hitsPerPage to use streaming instead of paging search.";
		
		long start = new Date().getTime();// start time
		IndexReader reader = null;
		try {
			reader = IndexReader.open(
					FSDirectory.open(new File(searchField.getIndexPath())), true);
		} catch (CorruptIndexException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
		} catch (IOException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
		} // only searching, so
															// read-only=true

		if (searchField.getNormsField() != null)
			reader = new NormalIndexReader(reader, searchField.getNormsField());

		Searcher searcher = new IndexSearcher(reader);
		Analyzer analyzer = new IKAnalyzer(); //new StandardAnalyzer(Version.LUCENE_CURRENT);
		String[] fieldArr = {"intro", "fileName", "lastModified", "contents"};
//		QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, fieldArr,
//				analyzer);
		if (searchField.getQueries() == null) // prompt the user
			System.out.println("Enter query: ");

		Query query = null;
		try {
			query = IKQueryParser.parseMultiField(fieldArr, searchField.getQueries());
		} catch (IOException e) {
			logger.error("查询出错：", e);
		}//parser.parse(searchField.getQueries());
		
		List<Document> docList = new ArrayList<Document>();
		//TermQuery q1 = new TermQuery(new Term("filename", "1"));
		Sort sort;
		TopDocs topDocs;
		try {
			// 搜索相似度最高的记录
			sort = new Sort(new SortField("fileName", SortField.DOC, false));
			//分页查询
			TopScoreDocCollector results = TopScoreDocCollector.create(
							paging.getCurrentPage()+paging.getPageSize(), false);
			searcher.search(query, results);
			TopDocs tds = results.topDocs(paging.getCurrentPage(), paging.getPageSize());
			ScoreDoc[] hits3 = tds.scoreDocs;
			SimpleHTMLFormatter simpleHtmlFormatter = new SimpleHTMLFormatter(
					"<span style='color:#ff0000'>", "</span>");// 设定高亮显示的格式，也就是对高亮显示的词组加上前缀后缀
			Highlighter highlighter = new Highlighter(simpleHtmlFormatter,
					new QueryScorer(query));
			
			for (int i = 0; i < hits3.length; i++) {
				Document doc = searcher.doc(hits3[i].doc);
				String docIntro = doc.get("intro");
				highlighter.setTextFragmenter(new SimpleFragmenter(docIntro
						.length()));// 设置每次返回的字符数.想必大家在使用搜索引擎的时候也没有一并把全部数据展示出来吧，当然这里也是设定只展示部分数据
				TokenStream tokenStream = analyzer.tokenStream("",
						new StringReader(docIntro));
				String str = highlighter.getBestFragment(tokenStream, docIntro);
				System.out.println(" 高亮设置: " + str);

				String docModified = doc.get("fileName");
				highlighter.setTextFragmenter(new SimpleFragmenter(docModified
						.length()));

				TokenStream tokenStream2 = analyzer.tokenStream("",
						new StringReader(docModified));
				String str2 = highlighter.getBestFragment(tokenStream2,
						docModified);
				System.out.println(" 高亮设置: " + str2);
				docList.add(doc);
			}
//			
//			for(int i = 0, len = topDocs.totalHits; i < len; i++){
//				docList.add(searcher.doc(topDocs.scoreDocs[i].doc));
//			}
		} catch (IOException e) {
			logger.error("查询出错：", e);
		} catch (InvalidTokenOffsetsException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

//		if (searchField.isPaging()) {
//			searchFiles.doPagingSearch(in, searcher, query, searchField.getHitsPerPage(), raw,
//					queries == null);
//		} else {
//			searchFiles.doStreamingSearch(searcher, query);
//		}
//		reader.close();
		
		long end = new Date().getTime();// end time

	   System.out.println("Found  hits3.length"
	     + " document(s) (in " + (end - start)
	     + " milliseconds) that matched query '':" + docList.size());
		return docList;
	}
	
}
