/*
 * Copyright (c) 2017,重庆阿莫比科技有限公司,All Rights Reserved.
 */
package com.wowocai.btchina.service.impl;

import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.sphx.api.SphinxClient;
import org.sphx.api.SphinxException;
import org.sphx.api.SphinxMatch;
import org.sphx.api.SphinxResult;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import com.chenlb.mmseg4j.ComplexSeg;
import com.chenlb.mmseg4j.Dictionary;
import com.chenlb.mmseg4j.MMSeg;
import com.chenlb.mmseg4j.Seg;
import com.chenlb.mmseg4j.Word;
import com.wowocai.btchina.entity.PagerEntity;
import com.wowocai.btchina.entity.WebTorrentEntity;
import com.wowocai.btchina.entity.WebTorrentFileEntity;
import com.wowocai.btchina.mapper.TorrentMapper;
import com.wowocai.btchina.service.SearchEngineService;
import com.wowocai.btchina.utils.JacksonUtil;
import com.wowocai.btchina.utils.ip.ObjectUtil;

/**
 * <br/>
 * Date: 2017年5月24日 下午5:55:32 <br/>
 * 
 * @Author 刘建成
 * @Version
 */
@Service
public class SphinxEngineServiceImpl implements SearchEngineService, InitializingBean {

	private final static Logger logger = LoggerFactory.getLogger(SphinxEngineServiceImpl.class);

	@Value("${sphinx.host}")
	private String host = "192.168.46.100";

	@Value("${sphinx.port}")
	private int port = 9312;

	@Value("${sphinx.index}")
	private String index = "sph_idx_spider_main";

	private SphinxClient sphinxClient = null;

	private ComplexSeg complex = null;

	@Autowired
	private TorrentMapper torrentMapper = null;

	/**
	 * Creates a new instance of SphinxEngineServiceImpl.
	 */
	public SphinxEngineServiceImpl() {

	}

	/**
	 * 改写为将数据从mysql推送到sphinx
	 * 
	 * @see com.wowocai.btchina.service.SearchEngineService#pushMongoToElasticsearch()
	 */
	@Override
	public void pushMongoToElasticsearch() {

		// TODO 此任务将由shell脚本调用命令执行

	}

	/**
	 * 改写为将种子的VisitCount同步更新到sphinx中,
	 * 
	 * @see com.wowocai.btchina.service.SearchEngineService#syncVisitCountToEsByDate()
	 */
	@Override
	public void syncVisitCountToEsByDate() {

		// TODO 此任务将由shell脚本调用命令执行

	}

	/**
	 * TODO The function of this method is described here - Optional.
	 * 
	 * @see com.wowocai.btchina.service.SearchEngineService#findTorrentByInfohash(java.lang.String)
	 */
	@Override
	public WebTorrentEntity findTorrentByInfohash(String infohash) {

		// torrentMapper.findTorrentInfo(infohash);
		try {
			//StopWatch sw = new StopWatch();
			//sw.start("查询");
			Map<String, Object> map = torrentMapper.findTorrentInfo(infohash);
			if (map == null) {
				return null;
			}
			//sw.stop();
			//sw.start("转换");
			WebTorrentEntity entity = convertEntity(map);
			//sw.stop();
			//logger.info(sw.prettyPrint());
			return entity;
		} catch (Exception e) {
			e.printStackTrace();
		}
		return null;
	}

	/**
	 * <br/>
	 * 
	 * @author 刘建成
	 * @param map
	 * @return
	 */
	private WebTorrentEntity convertEntity(Map<String, Object> map) {

		WebTorrentEntity bean = new WebTorrentEntity();
		String name = (String) map.get("name");
		String infohash = (String) map.get("infohash");
		bean.setName(name);
		bean.set_id(infohash);
		bean.setInfohash(infohash);
		bean.setCreateDate((String) map.get("createdate"));
		bean.setCreateTime(convertDate(map.get("createtime")));
		bean.setHumanSize((String) map.get("hsize"));
		bean.setLastUpdate((String) map.get("lastupdate"));

		bean.setMagnet("magnet:?xt=urn:btih:" + infohash.toUpperCase());
		bean.setSize(ObjectUtil.getLong(map.get("size")));
		bean.setVideo((int) map.get("isvideo") == 1);
		bean.setVisitCount(ObjectUtil.getInt(map.get("vcount")));
		bean.setFiles(JacksonUtil.readValueList((String) map.get("files"), WebTorrentFileEntity.class));

		try {
			String words = segWords(name, " ");
			if (words != null) {
				String[] w = words.split("\\p{Space}");
				List<String> keys = Arrays.asList(w);
				keys = keys.size() > 20 ? keys.subList(0, 20) : keys;
				bean.setKeys(keys);
			}
		} catch (IOException e) {

		}
		return bean;
	}

	private ComplexSeg getComplexSeg() {
		if (complex == null) {
			complex = new ComplexSeg(Dictionary.getInstance());
		}
		return complex;
	}

	public String segWords(Reader input, String wordSpilt) throws IOException {
		StringBuilder sb = new StringBuilder();
		Seg seg = getComplexSeg(); // 取得不同的分词具体算法
		MMSeg mmSeg = new MMSeg(input, seg);
		Word word = null;
		boolean first = true;
		while ((word = mmSeg.next()) != null) {
			if (!first) {
				sb.append(wordSpilt);
			}
			String w = word.getString();
			sb.append(w);
			first = false;

		}
		return sb.toString();
	}

	public String segWords(String txt, String wordSpilt) throws IOException {
		return segWords(new StringReader(txt), wordSpilt);
	}

	private Date convertDate(Object val) {
		if (val == null) {
			return new Date();
		}
		if (val instanceof Integer) {
			return new Date((Integer) val * 1000L);
		}
		if (val instanceof Date) {
			return (Date) val;
		}

		if (val instanceof Long) {
			return new Date((Long) val * 1000);
		}
		return new Date();

	}

	/**
	 * 新版中不再生成sitemap
	 * 
	 * @see com.wowocai.btchina.service.SearchEngineService#createSiteMap()
	 */
	@Override
	public void createSiteMap() {

		// TODO Auto-generated method stub

	}

	// /**
	// * 简单字符串高亮
	// * <br/>
	// *
	// * @author 刘建成
	// * @param doc 完整文本
	// * @param before 高亮前缀
	// * @param after 高亮后缀
	// * @param keywords 关键词列表
	// * @param index 指定高亮关键词的索引值
	// * @param maxlength doc最大长度，超过截取，为0时不截取
	// * @param around 截取摘要后的结尾符号
	// * @return
	// */
	// public static String highlight(String doc, String before, String after,
	// String[] keywords, int maxlength,
	// String around) {
	// if (doc == null) {
	// return null;
	// }
	// if (keywords == null || keywords.length == 0) {
	// return doc;
	// }
	// if (around == null) {
	// around = "";
	// }
	// String doc_ = null;
	// int length = doc.length();
	// if (maxlength <= 0 || length < maxlength) {
	// doc_ = doc;
	// } else {
	// doc_ = doc.substring(0, maxlength) + around;
	// }
	//
	// List<String> strs = Arrays.asList(keywords);
	// strs.sort(new Comparator<String>() {
	//
	// @Override
	// public int compare(String o1, String o2) {
	//
	// int n1 = o1 == null ? -1 : 0;
	// if (n1 == -1) {
	// return n1;
	// }
	// int n2 = o2 == null ? -1 : 0;
	// if (n2 == -1) {
	// return 1;
	// }
	// return o2.length() - o1.length();
	// }
	// });
	//
	// List<int[]> res = indexOf(doc, strs.toArray(new String[] {}));
	// // 合并后的匹配记录
	// // System.out.println(JacksonUtil.toJSon(res));
	// if (res.size() == 0) {
	// return doc_;
	// }
	// return buidHighlight(doc_, res, before, after);
	// }

	/**
	 * 生成高亮文本 <br/>
	 * 
	 * @author 刘建成
	 * @param doc
	 * @param res
	 * @param before
	 * @param after
	 * @return
	 */
	private String buidHighlight(String doc, List<int[]> res, String before, String after) {
		StringBuffer sb = new StringBuffer();
		int last = 0;
		for (int i = 0; i < res.size(); i++) {
			int[] a = res.get(i);
			if (i == 0 && a[0] > 0) {
				sb.append(doc.substring(0, a[0]));
			}
			if (last > 0 && last != a[0]) {
				sb.append(doc.substring(last, a[0]));
			}
			sb.append(before);
			sb.append(doc.substring(a[0], a[1]));
			sb.append(after);
			last = a[1];
		}
		sb.append(doc.substring(last));

		return sb.toString();
	}

	/**
	 * 获取KMP算法中pattern字符串对应的next数组
	 * 
	 * @param p
	 *            模式字符串对应的字符数组
	 * @return
	 */
	protected int[] getNext(char[] p) {
		// 已知next[j] = k,利用递归的思想求出next[j+1]的值
		// 如果已知next[j] = k,如何求出next[j+1]呢?具体算法如下:
		// 1. 如果p[j] = p[k], 则next[j+1] = next[k] + 1;
		// 2. 如果p[j] != p[k], 则令k=next[k],如果此时p[j]==p[k],则next[j+1]=k+1,
		// 如果不相等,则继续递归前缀索引,令 k=next[k],继续判断,直至k=-1(即k=next[0])或者p[j]=p[k]为止
		int pLen = p.length;
		int[] next = new int[pLen];
		int k = -1;
		int j = 0;
		next[0] = -1; // next数组中next[0]为-1
		while (j < pLen - 1) {
			if (k == -1 || p[j] == p[k]) {
				k++;
				j++;
				// 修改next数组求法
				if (p[j] != p[k]) {
					next[j] = k;// KMPStringMatcher中只有这一行
				} else {
					// 不能出现p[j] = p[next[j]],所以如果出现这种情况则继续递归,如 k = next[k],
					// k = next[[next[k]]
					next[j] = next[k];
				}
			} else {
				k = next[k];
			}
		}
		return next;
	}

	// /**
	// *
	// * <br/>
	// *
	// * @author 刘建成
	// * @param source
	// * @param patterns 模式字符串数组，按字符串长度倒序排序
	// * @return
	// */
	// private static List<int[]> indexOf(String source, String[] patterns) {
	// char[] srcs = source.toCharArray();
	// int i = 0, j = 0;
	// int sLen = srcs.length;
	// int psLen = patterns.length;
	//
	// List<int[]> result = new ArrayList<int[]>();
	//
	// // 源字符串
	// int cousor = 0;
	// for (int k = 0; k < sLen; k++) {
	// // 模式数组
	// cousor = k;
	// for (int l = 0; l < psLen; l++) {
	// char[] ptn = patterns[l].toCharArray();
	// int pLen = ptn.length;
	//
	// for (int m = 0; m < pLen; m++) {
	// // 匹配成功
	// if (srcs[k] == ptn[m]) {
	// k++;
	// } else {
	// k = cousor;
	// break;
	// }
	// // 匹配到尾部
	// if (m == pLen - 1) {
	// int[] pos = { cousor, cousor + pLen };
	// result.add(pos);
	// }
	// }
	//
	// }
	// }
	// // 按开始坐标排序
	// result.sort(new Comparator<int[]>() {
	//
	// @Override
	// public int compare(int[] o1, int[] o2) {
	// int a = o1[0] - o2[0];
	// int b = o1[1] - o2[1];
	// // 前坐标相等的情况下，后坐标反向排序
	// return a == 0 ? 0 - b : a;
	// }
	// });
	//
	// // 原始匹配记录
	// // System.out.println(JacksonUtil.toJSon(result));
	// result = combine(result);
	// return result;
	// }

	/**
	 * 相邻的标签合并 <br/>
	 * 
	 * @author 刘建成
	 * @param result
	 * @return
	 */
	private List<int[]> combine(List<int[]> result) {
		List<int[]> res = new ArrayList<int[]>();
		int size = result.size();
		int count = 0;
		for (int k = 0; k <= size - 1; k++) {
			int[] a = result.get(k);
			// 最后个坐标
			if (k == size - 1) {
				res.add(a);
				count++;
				continue;
			}
			int[] b = result.get(k + 1);
			int s1 = a[0], e1 = a[1];
			int s2 = b[0], e2 = b[1];
			// [5,9] [6,12] 或[10,12]
			if (s1 <= s2 && s2 <= e1 && e2 > e1) {
				int[] re = { s1, e2 };
				res.add(re);
				count++;
				k++;
				continue;
			}
			// [5,9] ,[6,9] 或 [6,8]
			if (s1 <= s2 && e1 >= e2) {
				res.add(a);
				count++;
				k++;
				continue;
			}
			res.add(a);
			count++;

		}
		if (size != count) {
			return combine(res);
		} else {
			return result;
		}
	}

	/**
	 * KMP匹配算法 <br/>
	 * 
	 * @author 刘建成
	 * @param source
	 * @param pattern
	 * @return
	 */
	public List<int[]> match(String source, String pattern) {
		int i = 0, j = 0;
		char[] src = source.toCharArray();
		char[] ptn = pattern.toCharArray();
		int sLen = src.length;
		int pLen = ptn.length;
		int[] next = getNext(ptn);
		List<int[]> result = new ArrayList<int[]>();
		while (i < sLen) {
			// 如果j = -1,或者当前字符匹配成功(src[i] = ptn[j]),都让i++,j++
			if (j == -1 || src[i] == ptn[j]) {
				i++;
				j++;
			} else {
				// 如果j!=-1且当前字符匹配失败,则令i不变,j=next[j],即让pattern模式串右移j-next[j]个单位
				j = next[j];
			}
			if (j == pLen) {
				int[] pos = { i - j, i - j + pLen };
				result.add(pos);
				j = 0;
			}
		}
		return result;
	}

	/**
	 * KMP匹配算法高亮显示 <br/>
	 * 
	 * @author 刘建成
	 * @return
	 */
	public String highlight(String doc, String before, String after, String[] keywords, int maxlength, String around) {
		if (doc == null) {
			return null;
		}
		if (keywords == null || keywords.length == 0) {
			return doc;
		}
		if (around == null) {
			around = "";
		}
		String doc_ = null;
		int length = doc.length();
		if (maxlength <= 0 || length < maxlength) {
			doc_ = doc;
		} else {
			doc_ = doc.substring(0, maxlength) + around;
		}
		List<int[]> positions = new ArrayList<int[]>();
		String docLow = doc_.toLowerCase();
		for (int i = 0; i < keywords.length; i++) {
			String key = keywords[i];
			positions.addAll(match(docLow, key.toLowerCase()));
		} // 按开始坐标排序
		positions.sort(new Comparator<int[]>() {

			@Override
			public int compare(int[] o1, int[] o2) {
				int a = o1[0] - o2[0];
				int b = o1[1] - o2[1];
				// 前坐标相等的情况下，后坐标反向排序
				return a == 0 ? 0 - b : a;
			}
		});
		positions = combine(positions);

		return buidHighlight(doc_, positions, before, after);
	}

	// public static void main(String[] args) {
	// long start = System.currentTimeMillis();
	// String source = "(同人CG集) [我本]
	// 女子○初中を時間停止！～ほうかご図書系列を貸出し中fucking出しレ○プ～.zip";
	// for (int i = 0; i < 10; i++) {
	// highlight(source, "<b>", "</b>", new String[] { "我", "本", "初中", "系列",
	// "系", "列", "fuck" }, 1000, "...");
	// }
	// long end = System.currentTimeMillis();
	// System.out.println(end - start);
	// // System.out.println(re);
	// }

	/**
	 * 新版中不再ping google
	 */
	@Override
	public void pingGoogle() {

	}

	public static void main(String[] args) {
		SphinxEngineServiceImpl manager = new SphinxEngineServiceImpl();
		String host = "188.213.134.100";
		int port = 9316;
		String index = "sph_idx_spider_main";
		String word = "Fuck";
		String before = "<span class=\"highlight\">";
		String after = "</span>";
		Map<String, Object> highlight = new HashMap<String, Object>();
		highlight.put("before_match", before);
		highlight.put("after_match", after);
		highlight.put("chunk_separator", "...");
		// highlight.put("limit", 256);
		// highlight.put("around", 5);

		// 合并相近关键词
		highlight.put("exact_phrase", 1);
		// highlight.put("single_passage", 1);
		// highlight.put("limit_words", 5);
		SphinxClient sphinxClient = new SphinxClient(host, port);
		sphinxClient.SetConnectTimeout(10000);
		try {
			// https://my.oschina.net/airylinus/blog/55836
			// 设置排序模式，根据排序属性排序，属性指定 为group_id
			// SPH_SORT_RELEVANCE 模式, 按相关度降序排列（最好的匹配排在最前面）
			// SPH_SORT_ATTR_DESC 模式, 按属性降序排列 （属性值越大的越是排在前面）
			// SPH_SORT_ATTR_ASC 模式, 按属性升序排列（属性值越小的越是排在前面）
			// SPH_SORT_TIME_SEGMENTS 模式, 先按时间段（最近一小时/天/周/月）降序，再按相关度降序
			// SPH_SORT_EXTENDED 模式, 按一种类似SQL的方式将列组合起来，升序或降序排列。
			// SPH_SORT_EXPR 模式，按某个算术表达式排序。
			// sphinxClient.SetSortMode(SphinxClient.SPH_SORT_RELEVANCE,
			// "name");

			// 分页查询的范围
			int fromIndex = 0;
			sphinxClient.SetLimits(fromIndex, 20);
			// 查询,如果不指定索引则从全部的索引库中搜索

			// sphinxClient.SetFilter("infohash",
			// "a6a8d754e1b5d2bf253b7f9bddb2a9dff57aa6e7", false);
			SphinxResult sphinxResult = sphinxClient.Query(word, index);
			// SphinxResult sphinxResult =
			// sphinxClient.Query("@infohash:a6a8d754e1b5d2bf253b7f9bddb2a9dff57aa6e7",
			// index);

			int size = sphinxResult.words.length;
			String[] keywords = new String[size];
			for (int i = 0; i < size; i++) {
				keywords[i] = sphinxResult.words[i].word;
			}

			// sphinxResult.getAttrNames();
			// 检索后匹配到的内容
			for (SphinxMatch m : sphinxResult.getMatches()) {
				// 得到所到记录Id
				// logger.info(" found in sphinx:" + m.attrValues.get(5));
				// String html = manager.highlight((String) m.attrValues.get(5),
				// before, after, keywords, 1000, null);
				// System.out.println(html);

				String[] names = sphinxClient.BuildExcerpts(new String[] { (String) m.attrValues.get(5) }, index, word,
						highlight);
				System.out.println(names[0]);
				// idList.add(m.getDocId());
			}

			// long total = hits.getTotalHits();
			// PagerEntity<WebTorrentEntity> page = new
			// PagerEntity<WebTorrentEntity>(total, pageSize);
			// page.setPageNo(pageNo);
			// SearchHit[] searchHits = hits.hits();
			// for (SearchHit s : searchHits) {
			// WebTorrentEntity bean =
			// JacksonUtil.readValue(s.getSourceAsString(),
			// WebTorrentEntity.class);
			// bean.setOrname(formatName(bean.getName()));
			// try {
			// String hname =
			// s.getHighlightFields().get("name").fragments()[0].string();
			// bean.setName(hname);
			// } catch (Exception e) {
			// }
			// page.getResult().add(bean);
			// }
			// return page;
		} catch (SphinxException e) {

			// TODO Auto-generated catch block
			e.printStackTrace();

		}
		sphinxClient.Close();
	}

	private WebTorrentEntity convertSimpleData(SphinxMatch match, String[] keywords) {
		// match.
		if (match == null) {
			return null;
		}
		ArrayList<Object> attrs = match.attrValues;
		Long size = ObjectUtil.getLong(attrs.get(0));
		int vcount = ObjectUtil.getInt(attrs.get(1));
		Date createtime = convertDate(attrs.get(2));
		boolean isvideo = ObjectUtil.getInt(attrs.get(3)) > 0;
		String infohash = (String) attrs.get(4);
		String name = (String) attrs.get(5);
		String orname = formatName(name);
		name = highlight(name, "<span class=\"highlight\">", "</span>", keywords, 0, null);
		String hsize = (String) attrs.get(6);
		String createdate = (String) attrs.get(8);
		String magnet = "magnet:?xt=urn:btih:" + infohash.toUpperCase();
		WebTorrentEntity bean = new WebTorrentEntity();
		bean.setSize(size).set_id(infohash).setCreateDate(createdate).setHumanSize(hsize).setInfohash(infohash)
				.setMagnet(magnet).setVisitCount(vcount).setVideo(isvideo).setCreateTime(createtime).setName(name)
				.setOrname(orname);
		return bean;
		// size, vcount, createtime, isvideo, infohash, name, hsize, files,
		// createdate, lastupdate
	}

	/**
	 * TODO The function of this method is described here - Optional.
	 * 
	 * 
	 * @see com.wowocai.btchina.service.SearchEngineService#search(java.lang.String,
	 *      java.lang.String, int, int)
	 */
	@Override
	public PagerEntity<WebTorrentEntity> search(String name, String sort, int pageNo, int pageSize) {
		SphinxClient sphinxClient = new SphinxClient(host, port);

		// 设置排序模式，根据排序属性排序，属性指定 为group_id
		// SPH_SORT_RELEVANCE 模式, 按相关度降序排列（最好的匹配排在最前面）
		// SPH_SORT_ATTR_DESC 模式, 按属性降序排列 （属性值越大的越是排在前面）
		// SPH_SORT_ATTR_ASC 模式, 按属性升序排列（属性值越小的越是排在前面）
		// SPH_SORT_TIME_SEGMENTS 模式, 先按时间段（最近一小时/天/周/月）降序，再按相关度降序
		// SPH_SORT_EXTENDED 模式, 按一种类似SQL的方式将列组合起来，升序或降序排列。
		// SPH_SORT_EXPR 模式，按某个算术表达式排序。

		try {
			// 默认是按相关性排序
			if (sort != null && !"name".equalsIgnoreCase(sort)) {
				sphinxClient.SetSortMode(SphinxClient.SPH_SORT_ATTR_DESC, sort);
			}
			// 分页查询的范围
			int fromIndex = (pageNo - 1) * pageSize;
			sphinxClient.SetLimits(fromIndex, pageSize);
			// 查询
			SphinxResult sphinxResult = sphinxClient.Query(name);
			// 关键词
			int size = sphinxResult.words.length;
			String[] keywords = new String[size];
			for (int i = 0; i < size; i++) {
				keywords[i] = sphinxResult.words[i].word;
			}

			List<WebTorrentEntity> result = new ArrayList<WebTorrentEntity>();
			// 检索后匹配到的内容
			for (SphinxMatch m : sphinxResult.getMatches()) {
				try {
					WebTorrentEntity bean = convertSimpleData(m, keywords);
					result.add(bean);
				} catch (Exception e) {
					logger.error("转换数据出错", e);
				}
			}

			long total = sphinxResult.getTotalFound();
			PagerEntity<WebTorrentEntity> page = new PagerEntity<WebTorrentEntity>(total, pageSize);
			page.setResult(result);
			page.setPageNo(pageNo);
			return page;

		} catch (SphinxException e) {
			return new PagerEntity<WebTorrentEntity>(0, pageSize);
		}
	}

	/**
	 * TODO The function of this method is described here - Optional.
	 * 
	 * @see org.springframework.beans.factory.InitializingBean#afterPropertiesSet()
	 */
	@Override
	public void afterPropertiesSet() throws Exception {
		sphinxClient = new SphinxClient(host, port);
		sphinxClient.SetConnectTimeout(5000);
	}

	/**
	 * 将名称中的符号去掉 <br/>
	 * 
	 * @author 刘建成
	 * @param name
	 * @return
	 */
	protected String formatName(String name) {
		if (isBlank(name)) {
			return "index";
		}
		String[] names = name.split("[\\p{Punct}+\\s+]");
		StringBuffer sb = new StringBuffer();
		for (String str : names) {
			if (!isBlank(str)) {
				sb.append(str).append("-");
			}
		}
		String result = sb.toString();
		if (isBlank(result)) {
			return "";
		}
		return result.substring(0, result.length() - 1);
	}

	private static boolean isBlank(String str) {
		return (str == null || str.trim().length() == 0);
	}
}
