package com.iwords.service.lucene;

import com.iwords.service.IWordsSearchService;
import com.iwords.service.IWordsSearchServiceLocal;
import com.iwords.service.object.ExamItemsEntity;
import com.iwords.service.object.ExamSentencesEntity;
import com.iwords.service.object.SysLibWordsEntity;
import com.iwords.service.persistence.SysLibWordsPersistence;
import com.oasis.iwords.lucene.analysis.PorterAnalyzer;
import com.oasis.iwords.lucene.annotation.processor.IndexSeacheAnnotationProcessor;
import com.oasis.iwords.lucene.annotation.processor.IndexSearchClassMetadata.IndexSearchElement;
import com.oasis.iwords.lucene.conversion.DefaultTypeConverter;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.*;
import org.apache.lucene.search.highlight.*;
import org.apache.lucene.util.Version;

import javax.ejb.EJB;
import javax.ejb.Stateless;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;

/**
 * Created by IntelliJ IDEA. User: qianxiang Date: 11-5-1 Time: 下午8:14 To change
 * this template use File | Settings | File Templates.
 */
@Stateless
public class IWordsSearchServiceImpl implements IWordsSearchService, IWordsSearchServiceLocal {

	@EJB
	private SysLibWordsPersistence sysLibWordsPersistence;

	public static void main(String[] args) {
		IWordsSearchService searchService = new IWordsSearchServiceImpl();
		List<ExamSentencesEntity> sentenceList = searchService.searchSentences("brave", 0, 100);
		for (ExamSentencesEntity sentence : sentenceList) {

			System.out.println(sentence);
		}

		List<ExamItemsEntity> items = searchService.searchExamItems("brave", 0, 100);
		for(ExamItemsEntity item : items){
			System.out.println(item);
		}
	}

	private Object putValue(Document doc, Class<?> clazz) {
		try {
			Object obj = Class.forName(clazz.getName()).newInstance();
			List<IndexSearchElement> elements = IndexSeacheAnnotationProcessor.getAnnotationProcessor().getIndexField(clazz);

			for (IndexSearchElement element : elements) {
				// System.out.println(element.getField());
				Field field = clazz.getDeclaredField(element.getField());
				if (!element.isAccessible()) {
					field.setAccessible(true);
				}
				field.set(obj, DefaultTypeConverter.getDefaultTypeConverter().convertValue(doc.get(element.getIndexField()), field.getType()));
			}
			return obj;

		} catch (InstantiationException e) {
			e.printStackTrace();
		} catch (IllegalAccessException e) {
			e.printStackTrace();
		} catch (ClassNotFoundException e) {
			e.printStackTrace();
		} catch (SecurityException e) {
			e.printStackTrace();
		} catch (NoSuchFieldException e) {
			e.printStackTrace();
		}
		return null;
	}

	public List search(String word, int start, int howMany, String field, Class<?> clazz, boolean hightlight,IndexSearcher searcher) {
		List list = new ArrayList();

		//IndexSearcher searcher = IndexerFactory.getFactory().getIndexSearcherSentence();
		Analyzer analyzer = new PorterAnalyzer(new StandardAnalyzer(Version.LUCENE_31));

		QueryParser parser = new QueryParser(Version.LUCENE_31, field, analyzer);
		try {
			Query query = parser.parse(field + ":" + word);
			SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter("<font color='red'>", "</font>");
			Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(query));

			TopScoreDocCollector collector = TopScoreDocCollector.create(start + howMany, false);
			searcher.search(query, collector);
			TopDocs result = collector.topDocs(start, howMany);
			ScoreDoc[] hits = result.scoreDocs;
			for (ScoreDoc scoreDoc : hits) {
				Document doc = searcher.doc(scoreDoc.doc);
				if (hightlight) {
					TokenStream tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), scoreDoc.doc, field, analyzer);
					try {
						// mergeContiguousFragments 如果段是首尾相连的是否合并
						TextFragment[] frag = highlighter.getBestTextFragments(tokenStream, searcher.doc(scoreDoc.doc).get(field), true, searcher.doc(scoreDoc.doc).get(field)
								.length() / 100 + 1);
						StringBuilder sb = new StringBuilder("");

						for (int j = 0; j < frag.length; j++) {
							// 只输出得分大于0的，关键字出现的段
							if ((frag[j] != null) && (frag[j].getScore() > 0)) {
								sb.append(frag[j].toString());
							}
						}
						// System.out.println("itemid: " +
						// doc.get("iid")+"  examid:"+doc.get("eid")+" itemtype:"
						// +doc.get("itype")+" itemname:" +doc.get("iname"));
						doc.getField(field).setValue(sb.toString());
						// System.out.println(doc.get(field));
					} catch (InvalidTokenOffsetsException e) {
						// TODO Auto-generated catch block
						e.printStackTrace();
					}
				}

				list.add(putValue(doc, clazz));
			}
		} catch (ParseException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}
		return list;
	}

	public List<ExamSentencesEntity> search(String word, int start, int howMany, String field) {
		List<ExamSentencesEntity> sentenceList = new ArrayList<ExamSentencesEntity>();
		IndexSearcher searcher = IndexerFactory.getFactory().getIndexSearcherSentence();
		Analyzer analyzer = new PorterAnalyzer(new StandardAnalyzer(Version.LUCENE_31));
		QueryParser parser = new QueryParser(Version.LUCENE_31, "sen", analyzer);

		try {
			Query query = parser.parse(field + ":" + word);

			SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter("<font color='red'>", "</font>");
			Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(query));

			TopScoreDocCollector collector = TopScoreDocCollector.create(start + howMany, false);
			searcher.search(query, collector);
			TopDocs result = collector.topDocs(start, howMany);
			// System.out.println(result.totalHits); //TODO 以后分页在使用
			ScoreDoc[] hits = result.scoreDocs;
			for (ScoreDoc scoreDoc : hits) {
				// System.out.println(reader.document(scoreDoc.doc));
				Document doc = searcher.doc(scoreDoc.doc);
				// try {
				// //分析一段文字，高亮显示 根据关键字查找到文档后，根据文档ID获得文档内容详情，然后高亮显示全文的关键字
				// //
				// 默认每个段的长度是100，text会被划分成为lenth/100个段，如果最大高亮的段数maxNumFragments比划分的端短，就会比较段的得分，得分高的maxNumFragments个端被返回，如果被返回的这些段有首尾相连的，就进行合并返回
				// String[] frags = highlighter.getBestFragments(analyzer,
				// "bbb", s, s.length() / 100 + 1);
				// System.out.println(frags.length);
				// for (int j = 0; j < frags.length; j++) {
				// if ((frags[j] != null)) {
				// System.out.println((frags[j]));
				// }
				// }
				// } catch (InvalidTokenOffsetsException e1) {
				// // TODO Auto-generated catch block
				// e1.printStackTrace();
				// }

				// field参数如果提供，底层会直接通过reader获得text的文本频率位置向量（如果对应的field存在文本频率位置向量的话），否则根据field获得内容重新进行分词获得文本位置向量
				TokenStream tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), scoreDoc.doc, field, analyzer);
				ExamSentencesEntity sentence = new ExamSentencesEntity();
				sentence.setExamid(Integer.valueOf(doc.get("eid")));
				sentence.setSentenceid(Long.valueOf(doc.get("sid")));
				sentence.setItemid(Integer.valueOf(doc.get("iid")));
				// System.out.println("itemid: " +
				// doc.get("iid")+"  examid:"+doc.get("eid")+" sentenceid:"
				// +doc.get("sid"));
				try {
					// mergeContiguousFragments 如果段是首尾相连的是否合并
					TextFragment[] frag = highlighter.getBestTextFragments(tokenStream, searcher.doc(scoreDoc.doc).get(field), true,
							searcher.doc(scoreDoc.doc).get(field).length() / 100 + 1);
					StringBuilder sb = new StringBuilder("");
					for (int j = 0; j < frag.length; j++) {
						// 只输出得分大于0的，关键字出现的段
						if ((frag[j] != null) && (frag[j].getScore() > 0)) {
							sb.append(frag[j].toString());
						}
					}
					sentence.setSentence(sb.toString());

				} catch (InvalidTokenOffsetsException e) {
					e.printStackTrace();
				}
				sentenceList.add(sentence);
				// System.out.println(searcher.explain(query, scoreDoc.doc));
			}

		} catch (ParseException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}
		return sentenceList;
	}

	public List<ExamSentencesEntity> searchSentences(Long wordid, int start, int howMany) {
		SysLibWordsEntity sysLibWordsEntity = sysLibWordsPersistence.findWordByWordId(wordid);
		// return search(sysLibWordsEntity.getSpelling(),start,howMany,"sen");
		return search(sysLibWordsEntity.getSpelling(), start, howMany, "sen", ExamSentencesEntity.class, true,IndexerFactory.getFactory().getIndexSearcherSentence());
	}

	@Override
	public List<ExamSentencesEntity> searchSentences(String word, int start, int howMany) {
		return search(word, start, howMany, "sen", ExamSentencesEntity.class, true,IndexerFactory.getFactory().getIndexSearcherSentence());
	}

	@Override
	public List<ExamItemsEntity> searchExamItems(Long wordid, int start, int howMany) {
		SysLibWordsEntity sysLibWordsEntity = sysLibWordsPersistence.findWordByWordId(wordid);
		return search(sysLibWordsEntity.getSpelling(), start, howMany, "item",ExamItemsEntity.class,true,IndexerFactory.getFactory().getIndexSearcherItem());
	}

	@Override
	public List<ExamItemsEntity> searchExamItems(String word, int start, int howMany) {
		return search(word, start, howMany, "item",ExamItemsEntity.class,true,IndexerFactory.getFactory().getIndexSearcherItem());
	}

	

}
