package com.zhenyi.search;

import java.io.IOException;
import java.io.StringReader;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BooleanQuery.Builder;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.highlight.Fragmenter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.wltea.analyzer.lucene.IKAnalyzer;

import com.cjw.model.DBMap;
import com.jfinal.kit.StrKit;

public class SearchUtilsPro {

	private Path path;
	private final Analyzer analyzer = new IKAnalyzer();
	private final String highlighterPrefix = "<span style='color:red;'>";
	private final String highlighterSuffix = "</span>";

	public SearchUtilsPro(String path) {
		this.path = Paths.get(path);
	}

	public void remove(Long id) throws IOException, ParseException {
		Directory directory = FSDirectory.open(path);
		IndexWriterConfig config = new IndexWriterConfig(analyzer);
		IndexWriter indexWriter = new IndexWriter(directory, config);
		QueryParser parser = new QueryParser("id", analyzer);
		Query query = parser.parse(String.valueOf(id));
		indexWriter.deleteDocuments(query);
		indexWriter.close();
	}

	public HashMap<String, Object> search(String text)
			throws IOException, ParseException, InvalidTokenOffsetsException {
		return search(text, null, null, 1, 100);
	}

	public HashMap<String, Object> search(String text, int pageIndex, int pageSize)
			throws IOException, ParseException, InvalidTokenOffsetsException {
		return search(text, null, null, pageIndex, pageSize);
	}

	public HashMap<String, Object> search(String text, Integer type, int pageIndex, int pageSize)
			throws IOException, ParseException, InvalidTokenOffsetsException {
		return search(text, type, null, pageIndex, pageSize);
	}

	public HashMap<String, Object> search(String text, String[] fields)
			throws IOException, ParseException, InvalidTokenOffsetsException {
		return search(text, null, fields, 1, 100);
	}

	public HashMap<String, Object> search(String text, String[] fields, int pageIndex, int pageSize)
			throws IOException, ParseException, InvalidTokenOffsetsException {
		return search(text, null, fields, pageIndex, pageSize);
	}

	public HashMap<String, Object> search(String text, Integer type, String[] fields, int pageIndex, int pageSize)
			throws IOException, ParseException, InvalidTokenOffsetsException {

		Directory directory = FSDirectory.open(path);
		DirectoryReader ireader = DirectoryReader.open(directory);
		IndexSearcher isearcher = new IndexSearcher(ireader);
		if (fields == null) {
			fields = new String[] { "content", "title" };
		}
		/**
		 * 查询多个字段
		 */
		MultiFieldQueryParser parserMuti = new MultiFieldQueryParser(fields, analyzer);
		Query query1 = parserMuti.parse(text);

		BooleanClause bc1 = new BooleanClause(query1, Occur.MUST);
		Builder builder = new BooleanQuery.Builder().add(bc1);

		/**
		 * 判断type
		 */
		if (type != null) {
			Query query2 = parserMuti.createBooleanQuery("type", String.valueOf(type), BooleanClause.Occur.MUST);
			BooleanClause bc2 = new BooleanClause(query2, Occur.MUST);
			builder.add(bc2);
		}

		BooleanQuery query = builder.build();

		/**
		 * 排序
		 */
		// Sort sort = new Sort();
		// sort.setSort(new SortField("id", Type.SCORE, false));

		// TopDocs tds = isearcher.search(query, 50, sort);
		// ScoreDoc[] docs = tds.scoreDocs;
		// for (ScoreDoc scoreDoc : docs) {
		// System.out.println(scoreDoc);
		// Document hitDoc = isearcher.doc(scoreDoc.doc);
		// System.out.println(hitDoc);
		// System.out.println(hitDoc.getField("id"));
		// }

		QueryScorer scorer = new QueryScorer(query);
		SimpleHTMLFormatter sh = new SimpleHTMLFormatter(highlighterPrefix, highlighterSuffix);
		Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
		Highlighter highlighter = new Highlighter(sh, scorer);
		highlighter.setTextFragmenter(fragmenter);
		// ScoreDoc[] hits = isearcher.search(query, 100, sort).scoreDocs;
		ScoreDoc[] hits = isearcher.search(query, 100).scoreDocs;

		ArrayList<SearchResult> searchResults = new ArrayList<>();
		int start = (pageIndex - 1) * pageSize;
		int end = pageIndex * pageSize > hits.length ? hits.length : pageIndex * pageSize;
		int totalPage = (hits.length % pageSize == 0) ? (hits.length / pageSize) : (hits.length / pageSize + 1);
		for (int i = start; i < end; i++) {
			Document hitDoc = isearcher.doc(hits[i].doc);
			SearchResult result = new SearchResult();
			DBMap contents = new DBMap();
			result.setId(Long.parseLong(hitDoc.get("id")));
			result.setType(Integer.parseInt(hitDoc.get("type")));
			result.setTitle(hitDoc.get("title"));
			for (String key : fields) {
				String value = hitDoc.get(key);
				if ("id".equals(key)) {
					continue;
				}
				if (StrKit.notBlank(value)) {
					TokenStream ts = analyzer.tokenStream(key, new StringReader(value));
					String val = highlighter.getBestFragment(ts, value);
					if (StrKit.notBlank(val)) {
						if ("title".equals(key)) {
							result.setHighlightTitle(val);
							continue;
						}
						if ("content".equals(key)) {
							result.setHighlight(val);
							continue;
						}
					}
					contents.put(key, value);
				}
			}
			result.setContents(contents);
			if (result.getHighlightTitle() == null) {
				result.setHighlightTitle(result.getTitle());
			}
			searchResults.add(result);
		}
		HashMap<String, Object> map = new HashMap<String, Object>();
		map.put("pageIndex", pageIndex);
		map.put("pageSize", pageSize);
		map.put("totalPage", totalPage);
		map.put("results", searchResults);
		return map;
	}

	public void add(Long id, String title, Integer type, String content, HashMap<String, String> map)
			throws IOException {
		if (type == null) {
			type = 0;
		}
		Directory directory = FSDirectory.open(path);
		/*
		 * //将索引生成到内存中(优点:速度快,缺点:程序关闭,数据清除) Directory dir = new RAMDirectory();
		 */
		IndexWriterConfig config = new IndexWriterConfig(analyzer);
		IndexWriter indexWriter = new IndexWriter(directory, config);
		Document doc = new Document();
		// doc.add(new TextField("title", title, Field.Store.YES));
		doc.add(new TextField("name", title, Field.Store.YES)); // edit by zhongxian @20180813
		if (content != null && !content.equals("")) {
			doc.add(new TextField("content", delHtmlTag(content), Field.Store.YES));
		}
		doc.add(new TextField("id", String.valueOf(id), Field.Store.YES));
		doc.add(new TextField("type", String.valueOf(type), Field.Store.YES));
		doc.add(new TextField("addTime", String.valueOf(new Date()), Field.Store.YES));

		// doc.add(new NumericDocValuesField("id", id));
		if (map != null) {
			for (String key : map.keySet()) {
				String value = map.get(key);
				doc.add(new TextField(key, value, Field.Store.YES));
			}
		}
		indexWriter.addDocument(doc);
		indexWriter.close();
	}

	private String delHtmlTag(String str) {
		String newstr = "";
		newstr = str.replaceAll("<[.[^>]]*>", "");
		return newstr;
	}
}
