package cn.wx.scholar.neo4j.service;

import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.neo4j.ogm.model.Result;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Sort;
import org.springframework.stereotype.Service;
import cn.wx.common.persistence.exception.MyException;
import cn.wx.common.persistence.web.ResponseCode;
import cn.wx.common.utils.JSONUtils;
import cn.wx.scholar.core.entity.SchFosRelative;
import cn.wx.scholar.core.entity.ScholarExpert;
import cn.wx.scholar.core.entity.ScholarFos;
import cn.wx.scholar.core.entity.ScholarMaxData;
import cn.wx.scholar.core.entity.qo.SchFosRelativeQ;
import cn.wx.scholar.core.entity.qo.ScholarExpertQ;
import cn.wx.scholar.core.entity.qo.ScholarFosQ;
import cn.wx.scholar.core.service.SchFosRelativeService;
import cn.wx.scholar.core.service.ScholarExpertService;
import cn.wx.scholar.core.service.ScholarFosService;
import cn.wx.scholar.core.service.ScholarMaxDataService;
import cn.wx.scholar.neo4j.entity.Expert;
import cn.wx.scholar.neo4j.entity.Neo4jField;
import cn.wx.scholar.neo4j.entity.dto.FieldsExpertCountDTO;
import cn.wx.scholar.neo4j.entity.dto.NIndexDTO;
import cn.wx.scholar.neo4j.entity.dto.NumForYearDTO;
import cn.wx.scholar.neo4j.entity.dto.QueryDTO;
import cn.wx.scholar.neo4j.entity.dto.RelateExpertDTO;
import cn.wx.scholar.neo4j.entity.dto.RelevantExpertDTO;
import cn.wx.scholar.neo4j.entity.qo.ExpertQ;
import cn.wx.scholar.neo4j.repository.ExpertRepository;
import cn.wx.scholar.neo4j.repository.FieldRepository;
import cn.wx.scholar.tool.neo4j.N2OUtils;
import cn.wx.scholar.tool.neo4j.Neo4jQueryTools;

@Service
public class ExpertService extends Neo4jBaseService<Expert> {

	Logger logger = LoggerFactory.getLogger(ExpertService.class);

	@Autowired
	private FieldRepository fieldRepository;
	
	@Autowired
	private ExpertRepository expertRepository;

	@Autowired
	private ScholarFosService scholarFosService;

	@Autowired
	private SchFosRelativeService schFosRelativeService;
	
	@Autowired
	private ScholarExpertService scholarExpertService;
	
	@Autowired
	private ScholarMaxDataService scholarMaxDataService;
	
	
	
	/**
	 * 全文检索
	 * 搜索专家
	 */
	public Page<Expert> loadExperts(ExpertQ expertQ) {
		
		if(expertQ == null) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		/**
		 * 定义传参条件
		 */
		Map<String, Object> params = new HashMap<>();
		
		StringBuffer cypherBuffer = new StringBuffer(); // 查询sql
		StringBuffer countBuffer = new StringBuffer(); // 查询总数
		/**
		 * 定义查询sql
		 */
		final String cypher1 = " call apoc.index.search(\"locations\",\"";
		final String cypher2 =  "\",1000000) yield node as p ,weight as w with p ";
		
		String locationsQ = expertQ.getName();
		
		if(StringUtils.isBlank(locationsQ)) {
			return null;
		}
		
		/**
		 * 处理查询，
		 * 如果 领域 是完整的 我们能搜索到，走精确匹配 
		 * 
		 * 先验证领域
		 */
		
		Long fosId = scholarFosService.queryFosId(locationsQ);
		
		/**
		 * 存在当前领域
		 */
		if(fosId != null && fosId > 0){
			locationsQ = Neo4jQueryTools.basicIndex(locationsQ);//locationsQ.trim().replaceAll(" ", "___");
		}
		
		
		cypherBuffer.append(cypher1).append(locationsQ).append(cypher2);
		
		String nIndex = " where p.nIndex>={min} and p.nIndex<{max} ";
		
		cypherBuffer.append(nIndex);
		
		params.put("min", expertQ.getMin());
		params.put("max", expertQ.getMax());
		
		/**
		 * 根据语言查询
		 */
		if (expertQ.getLanguage() != null) {
			if ("other".equals(expertQ.getLanguage())) {
				cypherBuffer.append(" AND (p.language = '' or p.language is null)");
			} else {
				cypherBuffer.append(" AND p.language = {language}");
				params.put("language", expertQ.getLanguage());
			}

		}

		/**
		 * 根据国家查询
		 */
		if (expertQ.getCountry() != null) {
			if ("other".equals(expertQ.getCountry())) {
				cypherBuffer.append(" AND (p.country = '' or p.country is null)");
			} else {
				cypherBuffer.append(" AND p.country = {country}");
				params.put("country", expertQ.getCountry());
			}
		}
		
		/**
		 * 查询总数
		 */
		countBuffer.append(cypherBuffer.toString());
		countBuffer.append(" RETURN count(ID(p))");
		String countCypher = countBuffer.toString();
		/**
		 * 查询List
		 */
		cypherBuffer.append(" RETURN distinct p");
		String cypher = cypherBuffer.toString();
		
		/**
		 * 排序用
		 */
		Sort sort = null;
		sort = Neo4jQueryTools.basicSort(expertQ.getOrderType(), "p." + expertQ.getOrderField());
		
		/**
		 * 调用父类分页查询
		 */
		Page<Expert> page = super.queryPage(cypher, countCypher, params,
				new PageRequest(expertQ.getPageSkip(), expertQ.getPageSize(), sort));

		return page;
	}

	/**
	 * find 功能
	 */
	public Map<String, Result> getRelationForExpertOrOrg(String type, Long expert1Id, String name) {
		
		type = StringUtils.isBlank(type) ? "expert" : type;
		
		if (StringUtils.isBlank(name)) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}

		Result RelationResult = null;
		if ("expert".equals(type)) {
			RelationResult = expertRepository.getRelationForTwoExperts(expert1Id, name.toLowerCase());
		} else if ("org".equals(type)) {
			return null;
		} else {
			return null;
		}
		// 返回的relation只包含了ID，根据ID去查找专家集合
		Set<Long> allExpertIdSet = new HashSet<Long>();
		for (Map<String, Object> map : RelationResult) {
			@SuppressWarnings("unchecked")
			Map<String, ArrayList<String>> eMap = (Map<String, ArrayList<String>>) map.get("e");
			ArrayList<String> nodes = eMap.get("nodes");
			for (int i = 0; i < nodes.size(); i = i + 2) {
				String[] arr = nodes.get(i).split("/");
				allExpertIdSet.add(Long.parseLong(arr[arr.length - 1]));
			}
		}
		Long[] ids = new Long[allExpertIdSet.size()];
		Iterator<Long> it = allExpertIdSet.iterator();
		int j = 0;
		while (it.hasNext()) {
			ids[j++] = it.next();
		}
		Result expertsResult = expertRepository.getExpertByIds(ids);
		
		Map<String, Result> resultMap = new HashMap<String, Result>();
		resultMap.put("relation", RelationResult);
		resultMap.put("experts", expertsResult);
		return resultMap;
	}


	/**
	 * 统计单领域和多领域的专家
	 */
	public Result expertFosQz(List<String> queryFos) {

		if (queryFos == null || queryFos.size() == 0) {
			return null;
		}
		
		/**
		 * 获取领域对象id的集合
		 */
		List<Long> fosfIds = scholarFosService.queryFosIdByName(queryFos);

		final String match_ = "match(f1:field)-[r:containse]-(p:expert) where (";
		final String end_ = ") and p.nIndex>=-1 and p.nIndex<101 and r.rank > 0";

		StringBuffer sb = new StringBuffer();
		StringBuffer sb2 = new StringBuffer();
		sb.append(match_);
		sb2.append(match_);
		int i = 0;

		for (Long s : fosfIds) {
			if (i == 0) {
				sb.append(" f1.fId=").append(s);
				sb2.append(" f1.fId=").append(s);
			} else {
				sb.append(" or f1.fId=").append(s);
				sb2.append(" or f1.fId=").append(s);
			}
			i++;
		}

		/**
		 * 当只有一个领域的时候 根据rank排序
		 */
		String return1 = "";
		if(queryFos.size() == 1) {
			return1 = " return p.name as name,id(p) as id,r.rank as paperCount order by r.rank asc limit 10 ";
			sb.append(end_).append(return1);
			return super.queryListResult(sb.toString(), null);
		}
		
		/**
		 * 当为多领域是的时候进行下面的计算
		 */
		return1 = " return ID(p) as id order by r.rank asc limit " + fosfIds.size()*20;
		sb.append(end_).append(return1);
		Result re = super.queryListResult(sb.toString(), null);
		
		Map<String,Object> params = new HashMap<>();
		
		Set<Integer> expertIds = new HashSet<>();
		re.forEach((v) -> expertIds.add((Integer) v.get("id")));
		sb2.append(end_).append(" and ID(p) in {expertIds} ");
		return1 =	" return p.name as name,id(p) as id,sum(r.paperCount) as paperCount "
				+ " order by paperCount desc limit 10";
		
		sb2.append(return1);
		
		params.put("expertIds", expertIds);
		return super.queryListResult(sb2.toString(), params);
	}

	/**
	 * 用于
	 * 根据全文检索确定领域，确认专家数最多的那个领域
	 */
	public QueryDTO sureFosHasMaxExperts(String locationsQ) {
		
		if(StringUtils.isBlank(locationsQ)) {
			return null;
		}
		
	
		/**
		 * 处理查询，
		 * 如果 领域 是完整的 我们能搜索到，走精确匹配 
		 * 
		 * 先验证领域
		 */
		
		Long fosId = scholarFosService.queryFosId(locationsQ);
		
		/**
		 * 存在当前领域
		 */
		if(fosId != null && fosId > 0){
			return new QueryDTO(true);
		}
		
		/**
		 * 定义传参条件
		 */
		StringBuffer cypherBuffer = new StringBuffer(); // 查询sql
		/**
		 * 定义查询sql
		 */
		final String cypher1 = " call apoc.index.search(\"locations\",\"";
		final String cypher2 =  "\",1000000) yield node as p ,weight as w with p ";

		final String match1 = " match(f:field)-[r:containse]->(e:expert) ";

		cypherBuffer.append(cypher1).append("*" +locationsQ + "*").append(cypher2).append(match1);
		
		String where1 = " where ID(e)=ID(p) and p.nIndex>=0 and p.nIndex<101 and f.topLevel<>1";
		
		cypherBuffer.append(where1);

		cypherBuffer.append(" RETURN distinct f.name as fosName,count(distinct ID(e)) as expertCounts");
		cypherBuffer.append(" order by expertCounts desc limit 1");
		
		String cypher = cypherBuffer.toString();
		
		Result result1 = super.queryListResult(cypher, null);
		
		List<QueryDTO> l = covetResultToDTO(result1, QueryDTO.class);
		QueryDTO d = null;
		if( l !=null && l.size()>0 ){
			d = l.get(0);
			d.setQueryIsFos(false);
		}
		return d;
		
	}
	
	
	
	/**
	 * 新的聚类
	 * 先从数据库中取值
	 * 
	 */
	public List<ScholarFos> expertUnwind2(String fName) {

		if (StringUtils.isBlank(fName)) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		Long fosId = scholarFosService.queryFosId(fName);
		
		/**
		 * 如果没有查到领域
		 */
		if(fosId == null) {
			return null;
		}
		SchFosRelativeQ rQ = new SchFosRelativeQ();
		rQ.setFosId(fosId);
		List<SchFosRelative> list = schFosRelativeService.queryList(rQ);
		
		List<ScholarFos> rList = null;
		
		if(list != null) {
			
			List<Long> fosIds = new ArrayList<>();
			Map<Long,Double> map = new HashMap<>();
			Map<Long,Integer> mapc = new HashMap<>();
			for(SchFosRelative sr : list) {
				fosIds.add(sr.getRelativeId());
				map.put(sr.getRelativeId(), sr.getFval());
			}
			
			/**
			 * 统计每个领域的专家数量
			 */
			Result result = fieldRepository.getFieldsExpertCount(fosIds);
			
			List<FieldsExpertCountDTO> l = N2OUtils.covetResultToDTO(result, FieldsExpertCountDTO.class);
			
			/**
			 * 将count放到map中
			 */
			for(FieldsExpertCountDTO fc : l) {
				mapc.put(fc.getfId(), fc.getEcount());
			}
			
			ScholarFosQ fQ = new ScholarFosQ();
			fQ.setpIds(fosIds);
			rList = scholarFosService.queryList(fQ);
			
			if(rList != null) {
				for(ScholarFos sf : rList) {
					sf.setFval(map.get(sf.getpId()));
					sf.setScholarNum(mapc.get(sf.getpId()));
				}
			}
		}
		
		return rList;
	}
	

	/**
	 * nindex 划分
	 * @param expertQ
	 * @return
	 */
	public Object getIndexNum(ExpertQ expertQ) {
		
		
		if(expertQ == null) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		/**
		 * 定义传参条件
		 */
		StringBuffer cypherBuffer = new StringBuffer(); // 查询sql

		
		/**
		 * 定义查询sql
		 */
		final String cypher1 = " call apoc.index.search(\"locations\",\"";
		final String cypher2 =  "\",1000000) yield node as p ,weight as w with p ";
		
		String locationsQ = expertQ.getName();
		
		if(StringUtils.isBlank(locationsQ)) {
			return null;
		}
		
		/**
		 * 处理查询
		 */
		String unions = " union all";
		
		/**
		 * 处理查询，
		 * 如果 领域 是完整的 我们能搜索到，走精确匹配 
		 * 
		 * 先验证领域
		 */
		
		Long fosId = scholarFosService.queryFosId(locationsQ);
		
		/**
		 * 存在当前领域
		 */
		if(fosId != null && fosId > 0){
			locationsQ = Neo4jQueryTools.basicIndex(locationsQ); //locationsQ.trim().replaceAll(" ", "___");
		}

		String nIndex = " where p.nIndex>=0 and p.nIndex<10 ";
		cypherBuffer.append(cypher1).append(locationsQ).append(cypher2);
		cypherBuffer.append(nIndex).append(" return count(distinct p) as nIndexSum");
		cypherBuffer.append(unions);
		
		nIndex = " where p.nIndex>=10 and p.nIndex<50 ";
		cypherBuffer.append(cypher1).append(locationsQ).append(cypher2);
		cypherBuffer.append(nIndex).append(" return count(distinct p) as nIndexSum");
		cypherBuffer.append(unions);
		
		nIndex = " where p.nIndex>=50 and p.nIndex<80 ";
		cypherBuffer.append(cypher1).append(locationsQ).append(cypher2);
		cypherBuffer.append(nIndex).append(" return count(distinct p) as nIndexSum");
		cypherBuffer.append(unions);
		
		nIndex = " where p.nIndex>=80 ";
		cypherBuffer.append(cypher1).append(locationsQ).append(cypher2);
		cypherBuffer.append(nIndex).append(" return count(distinct p) as nIndexSum");

		String cypher = cypherBuffer.toString();
		Result result = super.queryListResult(cypher, null);
		if (result == null) {
			return null;
		}
		return result.queryResults();
		
	}

	public Expert findById(Long id) {
		Expert expert = expertRepository.findById(id);
		return expert;
	}

	public Map<String, List<Expert>> getRelateExpert(Long id) {
		Map<String, List<Expert>> queryParams = new HashMap<>();
		queryParams.put("publish", expertRepository.getRelateExpert1(id)); // 论文
		queryParams.put("write", expertRepository.getRelateExpert2(id)); // 图书
		queryParams.put("patent", expertRepository.getRelateExpert3(id)); // 论文
		return queryParams;
	}

	/**
	 * 相似专家的算法2
	 * 
	 * @param id
	 * @return
	 */
	public List<RelevantExpertDTO> getRelateExpert1(Long paramId) {
		
		/**
		 * 获取领域对象id的集合
		 */
		
		if(paramId == null) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		List<Neo4jField> fields = fieldRepository.getFieldsByExpertId(paramId);
		
		if(fields == null || fields.size() == 0) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		List<Long> fosfIds = new ArrayList<>();
		
		for(Neo4jField nf : fields) {
			fosfIds.add(nf.getfId());
		}
		
		final String match_ = "match(f1:field)-[r:containse]-(p:expert) where (";
		final String end_ = ") and p.nIndex>=-1 and p.nIndex<101 ";
		
		String return1 = " and r.rank > 0 return ID(p) as id order by r.rank asc limit " + fosfIds.size()*20;

		StringBuffer sb = new StringBuffer();
		StringBuffer sb2 = new StringBuffer();
		sb.append(match_);
		sb2.append(match_);
		
		int i = 0;

		for (Long s : fosfIds) {
			if (i == 0) {
				sb.append(" f1.fId=").append(s);
				sb2.append(" f1.fId=").append(s);
			} else {
				sb.append(" or f1.fId=").append(s);
				sb2.append(" or f1.fId=").append(s);
			}
			i++;
		}
		
		/**
		 * 当领域不为一的时候
		 */
		sb.append(end_).append(return1);
		
		/**
		 * 根据rank排序取前面的值
		 */
		Result re = super.queryListResult(sb.toString(), null);
		
		/**
		 * 查询专家对领域的论文数组装矩阵
		 */
		Map<String,Object> params = new HashMap<>();
		Set<Long> expertIds = new HashSet<>();
		expertIds.add(paramId);
		re.forEach((v) -> expertIds.add(Long.parseLong(v.get("id").toString())));
		
		/**
		 * 查找合作专家
		 */
		List<Expert> els = expertRepository.getRelateExpert1(paramId);
		
		if(els != null) {
			for(Expert e : els) {
				expertIds.add(e.getId());
			}
		}
		
		els = expertRepository.getRelateExpert2(paramId);
		
		if(els != null) {
			for(Expert e : els) {
				expertIds.add(e.getId());
			}
		}
		
		els = expertRepository.getRelateExpert3(paramId);
		
		if(els != null) {
			for(Expert e : els) {
				expertIds.add(e.getId());
			}
		}
		
		sb2.append(end_).append(" and ID(p) in {expertIds} ");
		return1 =	" return p.name as ename,ID(p) as eId,f1.name as fname,f1.fId as fiId,r.paperCount as paperCount ";
		sb2.append(return1);
		params.put("expertIds", expertIds);
		
		/**
		 * 获取专家 领域 论文数的结果集
		 */
		Result repc = super.queryListResult(sb2.toString(), params);
		
		/**
		 * 构建相似专家的list
		 */
		List<RelevantExpertDTO> relds = N2OUtils.covetResultToDTO(repc, RelevantExpertDTO.class);
		
		/**
		 * 相似度map
		 */
		Map<Long,Double> relevant = new HashMap<>();
		
		Map<String,Integer> map = new HashMap<>();
		Map<Long,RelevantExpertDTO> map_dto = new HashMap<>();
		for(RelevantExpertDTO ro : relds) {
			map.put(ro.geteId() + "_" + ro.getFiId(), ro.getPaperCount());
			map_dto.put(ro.geteId(), ro);
		}
		
		/**
		 * 循环专家
		 */
		
		for(Long eId : expertIds) {
			
			try{
				Double sumFirst = 0D;
				Double sumSecend = 0D;
				Double sumMulti = 0D;
				
				if(paramId == eId) {
					continue;
				}
				/**
				 * 循环领域
				 */
				
	
				for (Long s : fosfIds) {
					String key1 = paramId + "_" + s;
					String key2 = eId + "_" + s;
					Integer t1 = map.get(key1);
					Integer t2 = map.get(key2);
					t1 = (t1 == null ? 0 : t1);
					t2 = (t2 == null ? 0 : t2);
					sumMulti += t1 * t2;
					sumFirst += t1 * t1;
					sumSecend += t2 * t2;
				}
				
				/**
				 * 如果这2个数为零 相似度为0
				 */
				if(sumFirst <=0 || sumSecend <= 0) {
					relevant.put(eId, 0D);
					continue;
				}
				
				Double cosine_sim = sumMulti / Math.sqrt(sumFirst * sumSecend);
				relevant.put(eId, cosine_sim);
			}catch (Exception e) {
				logger.info("this is people ：" + eId);
				logger.info(e.getMessage());
			}
		}

		Map<Long, Double> sortMap = N2OUtils.mapSortByValue(relevant);
		
		ListIterator<Map.Entry<Long, Double>> fg = new ArrayList<Map.Entry<Long, Double>>(sortMap.entrySet()).listIterator(sortMap.size());  
		int ri = 0;
		List<RelevantExpertDTO> result = new ArrayList<>();
		
		while(fg.hasPrevious()) {  
            Map.Entry<Long, Double> entry=fg.previous();
            if(ri > 5) {
				break;
			}
			result.add(map_dto.get(entry.getKey()));
			ri++;
        }  
		
		return result;
	}
	
	/**
	 * 根据搜索条件获取language数据
	 * 
	 * @param expertQ
	 * @return
	 */
	public Object getLanguageInfo(ExpertQ expertQ) {
		
		if(expertQ == null) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		/**
		 * 定义传参条件
		 */
		Map<String, Object> params = new HashMap<>();
		
		StringBuffer cypherBuffer = new StringBuffer(); // 查询sql
		/**
		 * 定义查询sql
		 */
		final String cypher1 = " call apoc.index.search(\"locations\",\"";
		final String cypher2 =  "\",1000000) yield node as p ,weight as w with p ";
		
		String locationsQ = expertQ.getName();
		
		if(StringUtils.isBlank(locationsQ)) {
			return null;
		}
		
		/**
		 * 处理查询，
		 * 如果 领域 是完整的 我们能搜索到，走精确匹配 
		 * 
		 * 先验证领域
		 */
		
		Long fosId = scholarFosService.queryFosId(locationsQ);
		
		/**
		 * 存在当前领域
		 */
		if(fosId != null && fosId > 0){
			locationsQ = Neo4jQueryTools.basicIndex(locationsQ);
		}

		cypherBuffer.append(cypher1).append(locationsQ).append(cypher2);
		
		String nIndex = " where p.nIndex>={min} and p.nIndex<{max} and p.language is not null and p.language<>'' ";
		String return1 = " return p.language as language,count(p.language) as num order by p.language ";
		
		
		cypherBuffer.append(nIndex).append(return1);
		
		params.put("min", expertQ.getMin());
		params.put("max", expertQ.getMax());

		return super.queryListResult(cypherBuffer.toString(), params);
	}

	/**
	 * 根据搜索条件获取country数据
	 * 
	 * @param expertQ
	 * @return
	 */
	public Object getCountryInfo(ExpertQ expertQ) {
		
		if(expertQ == null) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		/**
		 * 定义传参条件
		 */
		Map<String, Object> params = new HashMap<>();
		
		StringBuffer cypherBuffer = new StringBuffer(); // 查询sql
		/**
		 * 定义查询sql
		 */
		final String cypher1 = " call apoc.index.search(\"locations\",\"";
		final String cypher2 =  "\",1000000) yield node as p ,weight as w with p ";
		
		String locationsQ = expertQ.getName();
		
		if(StringUtils.isBlank(locationsQ)) {
			return null;
		}
		
		/**
		 * 处理查询，
		 * 如果 领域 是完整的 我们能搜索到，走精确匹配 
		 * 
		 * 先验证领域
		 */
		
		Long fosId = scholarFosService.queryFosId(locationsQ);
		
		/**
		 * 存在当前领域
		 */
		if(fosId != null && fosId > 0){
			locationsQ = Neo4jQueryTools.basicIndex(locationsQ);
		}

		cypherBuffer.append(cypher1).append(locationsQ).append(cypher2);
		
		String nIndex = " where p.nIndex>={min} and p.nIndex<{max} and p.country is not null and p.country<>'' ";
		String return1 = " return p.country as country,count(p.country) as num order by p.country ";
		
		
		cypherBuffer.append(nIndex).append(return1);
		
		params.put("min", expertQ.getMin());
		params.put("max", expertQ.getMax());

		return super.queryListResult(cypherBuffer.toString(), params);
	}

	/**
	 * 二级页面合作专家查询
	 */
	public Result getRelateExpertParam2(String param, Integer num) {
		
		if (StringUtils.isBlank(param)) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}

		List<Expert> experts = expertRepository.getAllExpertByName(Neo4jQueryTools.basicLike(param), num);
		Long expertsIds[] = new Long[experts.size()];
		for (int i = 0; i < experts.size(); i++) {
			expertsIds[i] = experts.get(i).getId();
		}

		Result result = expertRepository.getRelateExpertByIds(expertsIds);
		return result;
	}

	/**
	 * 合作专家
	 */
	public List<RelateExpertDTO> getRelateExpertParam(String param, Integer limitNum) {
		
		if (StringUtils.isBlank(param)) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}

		StringBuffer cypherBuffer = new StringBuffer(); // 查询sql

		/**
		 * 定义查询sql
		 */
		final String cypher1 = " call apoc.index.search(\"locations\",\"";
		final String cypher2 =  "\",1000000) yield node as p ,weight as w with p ";
		
		String locationsQ = param;
		if(StringUtils.isBlank(locationsQ)) {
			return null;
		}
		
		/**
		 * 处理查询
		 */
		
		/**
		 * 处理查询，
		 * 如果 领域 是完整的 我们能搜索到，走精确匹配 
		 * 
		 * 先验证领域
		 */
		
		Long fosId = scholarFosService.queryFosId(locationsQ);
		
		/**
		 * 存在当前领域
		 */
		if(fosId != null && fosId > 0){
			locationsQ = Neo4jQueryTools.basicIndex(locationsQ);//locationsQ.trim().replaceAll(" ", "___");
		}
		
		cypherBuffer.append(cypher1).append(locationsQ + "*").append(cypher2);
		
		String nIndex = " where p.nIndex>=0 and p.nIndex<101 ";
		String returnCypher = "return distinct p order by p.nIndex desc limit " + limitNum;
		
		cypherBuffer.append(nIndex).append(returnCypher);

		
		String cypher = cypherBuffer.toString();
		List<Expert> expertList = super.queryList(cypher, null);
		
		Result resultBook = null;
		Result resultPaper = null;
		Result resultPatents = null;
		
		
		List<RelateExpertDTO> result = new ArrayList<>();
		
		if(expertList != null) {
			Set<Long> expertsIds = new HashSet<>();
			
			for (Expert e : expertList) {
				expertsIds.add(e.getId());
			}
			resultBook = expertRepository.getRelateBookExpertByIds(expertsIds);
			resultPaper = expertRepository.getRelatePaperExpertByIds(expertsIds);
			resultPatents = expertRepository.getRelatePatentsExpertByIds(expertsIds);
			result.addAll(covetResultToDTO(resultBook, RelateExpertDTO.class));
			result.addAll(covetResultToDTO(resultPaper, RelateExpertDTO.class));
			result.addAll(covetResultToDTO(resultPatents, RelateExpertDTO.class));
		}
		
		/**
		 * 有合作关系的优先放置
		 * 当合作的数量不够 补充没有关系的人
		 */
		
		Set<Long> reIds = new HashSet<>();
		
		if(result.size() < limitNum) {
			
			for(RelateExpertDTO rd : result) {
				reIds.add(rd.geteId());
				reIds.add(rd.getrId());
			}
			
			for(Expert e : expertList) {
				/**
				 * 没有在关系中存在
				 */
				if(!reIds.contains(e.getId())){
					result.add(new RelateExpertDTO(e.getId(), e.getName(), e.getPnum()));
				}
				
			}
		}
		
		return result;
	}
	
	/**
	 * 将result 转成 list
	 * @param rt
	 * @param m
	 * @return
	 */
	private <M>List<M> covetResultToDTO(Result rt,Class<M> c){
		if(rt == null) {
			return null;
		}
		String jsonStr = JSONUtils.c(rt);
		return (List<M>) JSONUtils.pa(jsonStr, c);
	}
	
	
	/**
	 * 根据机构
	 */
	public List<Expert> getExpertByOrg(Long orgId) {

		if (orgId == null) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		return expertRepository.getExpertByOrg(orgId);
	}

	/**
	 * 根据机构和领域
	 */
	public List<Expert> getExpertByOrgAndFos(Long orgId, String locationsQ) {

		if (orgId == null) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		Long fosId = scholarFosService.queryFosId(locationsQ);
		
		/**
		 * 存在当前领域
		 */
		if(fosId != null && fosId > 0){
			locationsQ = Neo4jQueryTools.basicIndex(locationsQ);//locationsQ.trim().replaceAll(" ", "___");
		}
		
		/**
		 * 定义传参条件
		 */
		Map<String, Object> params = new HashMap<>();
		
		StringBuffer cypherBuffer = new StringBuffer(); // 查询总数
		/**
		 * 定义查询sql
		 */
		final String cypher1 = " match(o:org)-[r:jobin]->(e:expert)-[r1:containse]-(f:field) "
				+ " where e.nIndex>=-1 and e.nIndex<101 and "
				+ " ID(o)={orgId} and f.fId={fId} "
				+ " RETURN distinct e limit 1000 ";
		
		cypherBuffer.append(cypher1);
		

		params.put("fId", fosId);
		params.put("orgId", orgId);
		
		String cypher = cypherBuffer.toString();
		return super.queryList(cypher, params);
	}

	/**
	 * 机构下一起合作过的专家
	 * 
	 * @param orgName
	 * @return
	 */
	public List<RelateExpertDTO> getRelateExpertByOrg(Long orgId, Integer limitNum) {
		
		if (orgId == null) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		// 1.机构下所有专家n的数量>nodesCount,仅显查询专家n之间的关系
		// 2.机构下所有专家n的数量<nodesCount,查询n与其他专家m(根据nIndex降序,数量为m.size = nodesCount -
		// n.size)的关系
		List<Expert> expertList = expertRepository.getAllExpertByOrgName(orgId, limitNum);
		
		
		Result resultBook = null;
		Result resultPaper = null;
		Result resultPatents = null;
		
		
		List<RelateExpertDTO> result = new ArrayList<>();
		
		if(expertList != null) {
			
			Set<Long> expertsIds = new HashSet<>();
			
			for (Expert e : expertList) {
				expertsIds.add(e.getId());
			}
			resultBook = expertRepository.getRelateBookExpertByIds(expertsIds);
			resultPaper = expertRepository.getRelatePaperExpertByIds(expertsIds);
			resultPatents = expertRepository.getRelatePatentsExpertByIds(expertsIds);
			result.addAll(covetResultToDTO(resultBook, RelateExpertDTO.class));
			result.addAll(covetResultToDTO(resultPaper, RelateExpertDTO.class));
			result.addAll(covetResultToDTO(resultPatents, RelateExpertDTO.class));
		}
		
		/**
		 * 有合作关系的优先放置
		 * 当合作的数量不够 补充没有关系的人
		 */
		
		Set<Long> reIds = new HashSet<>();
		
		if(result.size() < limitNum) {
			
			for(RelateExpertDTO rd : result) {
				reIds.add(rd.geteId());
				reIds.add(rd.getrId());
			}
			
			for(Expert e : expertList) {
				/**
				 * 没有在关系中存在
				 */
				if(!reIds.contains(e.getId())){
					result.add(new RelateExpertDTO(e.getId(), e.getName(), e.getPnum()));
				}
				
			}
		}
		
		return result;
	}

	/**
	 * 计算Nindex的趋势图
	 * 以专家在
	 */
	public ScholarExpert computeNindex(Long fId) {
	
		/**
		 * 获取最大值
		 */
		ScholarMaxData maxData = scholarMaxDataService.queryOne();
		
		/**
		 * 获取neo4j的专家
		 */
		Expert expert = expertRepository.findByfId(fId);
		
		Map<String,Integer> dataMap = new HashMap<>();
		
		/**
		 * 期刊
		 */
		Result journalResult = expertRepository.getJournalByYear(fId);
		List<NumForYearDTO> journalDto = covetResultToDTO(journalResult, NumForYearDTO.class);
		for(NumForYearDTO nd : journalDto) {
			dataMap.put(nd.getYear() + "_" + nd.getType(), nd.getC());
		}
		
		/**
		 * 会议
		 */
		Result conferenceResult = expertRepository.getConferenceByYear(fId);
		List<NumForYearDTO> conferenceDto = covetResultToDTO(conferenceResult, NumForYearDTO.class);
		for(NumForYearDTO nd : conferenceDto) {
			dataMap.put(nd.getYear() + "_" + nd.getType(), nd.getC());
		}
		/**
		 * 图书
		 */
		Result bookResult = expertRepository.getBookByYear(fId);
		List<NumForYearDTO> bookDto = covetResultToDTO(bookResult, NumForYearDTO.class);
		for(NumForYearDTO nd : bookDto) {
			dataMap.put(nd.getYear() + "_" + nd.getType(), nd.getC());
		}
		/**
		 * 专利
		 */
		Result patentResult = expertRepository.getPatentByYear(fId);
		List<NumForYearDTO> patentDto = covetResultToDTO(patentResult, NumForYearDTO.class);
		for(NumForYearDTO nd : patentDto) {
			dataMap.put(nd.getYear() + "_" + nd.getType(), nd.getC());
		}
		/**
		 * 专利引用
		 */
		Result patentCitReslut = expertRepository.getPatentCitByYear(fId);
		List<NumForYearDTO> patentCitDto = covetResultToDTO(patentCitReslut, NumForYearDTO.class);
		for(NumForYearDTO nd : patentCitDto) {
			dataMap.put(nd.getYear() + "_" + nd.getType(), nd.getC());
		}
		
		/*获取最大年 和 最小年
		 * 
		 */
		
		Integer[] b = computeYear(0, 0, journalDto);
		b = computeYear(b[0], b[1], conferenceDto);
		b = computeYear(b[0], b[1], bookDto);
		b = computeYear(b[0], b[1], patentDto);
		b = computeYear(b[0], b[1], patentCitDto);
		int minYear = b[0],maxYear = b[1];
		
		/**
		 * 如果没有最大最小年
		 * 默认当前年的 前一年
		 */
		if(minYear == 0 || maxYear == 0) {
			Calendar cale = Calendar.getInstance();  
	        int year = cale.get(Calendar.YEAR);
	        minYear = year - 2;
	        maxYear = year - 1;
		}
		
		/**
		 * aindex 目前只有 pnum bnum 按年计算
		 */
		Integer hindex = expert.gethIndex() == null ? 0 : expert.gethIndex(), 
			maxhindex = maxData.gethIndex(), 
			pnum = 0, 
			maxpnum = maxData.getPnum(), 
			citations = expert.getCitations() == null ? 0 : expert.getCitations(), 
			maxcitations = maxData.getCitations(), 
			bnum = 0, 
			maxbnum = maxData.getBnum(),
			jnum = 0,
			cnum = 0,
			cumapnum = 0,
			cumabnum = 0,
			interval = (maxYear - minYear),
			
			/**
			 * 可以按年统计的至于 panum pacit
			 */
			cumapanum = 0,
			cumapacit = 0,
			panum = 0, 
			maxpaNum = maxData.getPatnum(), 
			pacit = 0, 
			maxpatentsCit = maxData.getPatcitnum(), 
			efpData = expert.getEfpData() == null ? 0 : expert.getEfpData(), 
			maxefpData = maxData.getMaxefpData(), 
			eqWorkData = expert.getEqWorkData() == null ? 0 : expert.getEqWorkData(), 
			maxeqWorkData = maxData.getMaxeqWorkData(), 
			eqWork4Data = expert.getEqWork4Data()  == null ? 0 : expert.getEqWork4Data(), 
			maxeqWork4Data = maxData.getMaxeqWork4Data();
		/**
		 * 开始计算
		 */
		Double aindex = 0D,
			cumaindex = 0D,
			cumpindex = 0D,
			epindex = expert.getpIndex() == null ? 0D : expert.getpIndex(),
			eaindex = expert.getaIndex() == null ? 0D : expert.getaIndex(),
			eiindex = expert.getiIndex() == null ? 0D : expert.getiIndex(),
			pindex = epindex / interval,
			iIndex = 0D,
			cumiIndex = 0D,
			nIndex = 0D,
			cumnIndex = 0D;
		
		/**
		 * 需要返回的结果
		 */
		List<Integer> yearData = new ArrayList<>();
		List<Double> nIndexData = new ArrayList<>();
		List<Double> cumnIndexData = new ArrayList<>();
		List<Double> aIndexData = new ArrayList<>();
		List<Double> iIndexData = new ArrayList<>();
		
		/**
		 * 论文 - 期刊
		 */
		List<Object> ny0 = null;
		/**
		 * 论文 - 会议
		 */
		List<Object> ny1 = null;
		/**
		 * 图书
		 */
		List<Object> ny2 = null;
		/**
		 * 专利
		 */
		List<Object> ny3 = null;
		
		List<List<Object>> nys0 = new ArrayList<>();
		List<List<Object>> nys1 = new ArrayList<>();
		List<List<Object>> nys2 = new ArrayList<>();
		List<List<Object>> nys3 = new ArrayList<>();
		
		for(int i = minYear;i <= maxYear;i++) {
			
			ny0 = new ArrayList<>();
			/**
			 * 论文 - 会议
			 */
			ny1 = new ArrayList<>();
			/**
			 * 图书
			 */
			ny2 = new ArrayList<>();
			/**
			 * 专利
			 */
			ny3 = new ArrayList<>();

			/**
			 * 计算这一年的aindex
			 */
			jnum = dataMap.get(i + "_Journal");
			cnum = dataMap.get(i + "_Conference");
			bnum = dataMap.get(i + "_book");
			
			jnum = (jnum == null)?0:jnum;
			cnum = (cnum == null)?0:cnum;
			bnum = (bnum == null)?0:bnum;
			pnum = jnum + cnum;

			ny0.add(String.valueOf(i));
			ny0.add(jnum);
			ny0.add("Journal");
			nys0.add(ny0);
			
			ny1.add(String.valueOf(i));
			ny1.add(cnum);
			ny1.add("Conference");
			nys1.add(ny1);
			
			ny2.add(String.valueOf(i));
			ny2.add(bnum);
			ny2.add("book");
			nys2.add(ny2);
			
			aindex = computeAindex(hindex, maxhindex, pnum, maxpnum, 
					citations, maxcitations, bnum, maxbnum);
			
			aIndexData.add(aindex);
			
			cumapnum += pnum;
			cumabnum += bnum;
			
			/**
			 * 计算累加年的aindex
			 */
			cumaindex = computeAindex(hindex, maxhindex, cumapnum, maxpnum, 
					citations, maxcitations, cumabnum, maxbnum);
			
			
			/**
			 * 计算这一年的pindex
			 * 由于pindex目前没法按照年 统计
			 * 取专家上的pindex / 年数 作为没一年的pindex
			 */
			cumpindex += pindex;
			if(epindex !=0 && cumpindex > epindex) {cumpindex = epindex;}
			
			/**
			 * 计算 i-index
			 */
			panum = dataMap.get(i + "_Patent");
			pacit = dataMap.get(i + "_PatentCit");
			panum = (panum == null)?0:panum;
			pacit = (pacit == null)?0:pacit;
			
			ny3.add(String.valueOf(i));
			ny3.add(panum);
			ny3.add("Patent");
			nys3.add(ny3);
			
			/**
			 * 计算这一年的iIndex
			 */
			iIndex = computeIindex(panum, maxpaNum, pacit, maxpatentsCit, 
					efpData, maxefpData, eqWorkData, maxeqWorkData, eqWork4Data, maxeqWork4Data);
			
			iIndexData.add(iIndex);
			
			cumapanum += panum;
			cumapacit += pacit;
			
			/**
			 * 计算累加年的iIndex
			 */
			cumiIndex = computeIindex(cumapanum, maxpaNum, cumapacit, maxpatentsCit, 
					efpData, maxefpData, eqWorkData, maxeqWorkData, eqWork4Data, maxeqWork4Data);

			/**
			 * 计算这一年的nIndex
			 */
			nIndex =computeNindex_(aindex, iIndex, pindex);
			
			if(eaindex !=0 && cumaindex > eaindex) {cumaindex = eaindex;}
			if(eiindex !=0 && cumiIndex > eiindex) {cumiIndex = eiindex;}
			
			/**
			 * 计算累加年的nIndex
			 */
			cumnIndex = computeNindex_(cumaindex, cumiIndex, cumpindex);
			
			logger.info("my test index : " + cumaindex + "-" + cumiIndex + "-" + cumpindex);
			
			yearData.add(i);
			nIndexData.add(formatDouble4(nIndex));
			cumnIndexData.add(formatDouble4(cumnIndex));
		}
		
		//index趋势返回数据
        List<Integer> trendList = new ArrayList<Integer>();
        trendList.add(trend(nIndexData));
        trendList.add(trend(aIndexData));
        trendList.add(trend(iIndexData));
        trendList.add(0);
		
        nys0.addAll(nys1);
        nys0.addAll(nys2);
        nys0.addAll(nys3);
        
		NIndexDTO no = new NIndexDTO();
		no.setLineYear(yearData);
		no.setLineData(nIndexData);
		no.setSumArrays(cumnIndexData);
		
		/**
		 * 返回结果
		 */
		ScholarExpert se = new ScholarExpert();
		se.setfId(fId);
		se.setnIndexforYear(JSONUtils.c(no));
		se.setIndexTrend(JSONUtils.c(trendList));
		se.setNumforYear(JSONUtils.c(nys0));
		return se;
	}
	
	/**
	 * 小数格式化
	 * @param d
	 * @return
	 */
	public static Double formatDouble4(double d) {
        DecimalFormat df = new DecimalFormat("#.00");
        return Double.valueOf(df.format(d));
    }
	
	/**
	 * 计算n-index
	 * 0.7*AIndex*100+0.1*iIndex+0.2*pIndex
	 */
	private static Double computeNindex_(Double aIndex,
			Double iIndex,
			Double pIndex) {
		return aIndex * 0.7 + iIndex * 0.1 + pIndex * 0.2;
	}
	
	/**
	 * 计算iIndex
	 * log1 = log(panum + 1) * 0.3 / log(maxpatentsNum + 1);	
	 * log2 = log(pacit + 1) * 0.3 / log(maxpatentsCit + 1);	
	 * log3 = log(efpData + 1) * 0.1 / log(maxefpData + 1);	
	 * log4 = log(eqWorkData + 1) * 0.15 / log(maxeqWorkData + 1);	
	 * log5 =log(eqWork4Data + 1) * 0.15 / log(maxeqWork4Data + 1);	
	 * iIndex = parseInt(100 * (log1 + log2 + log3 + log4 + log5));	
	 */
	private static Double computeIindex(Integer panum,
			Integer maxpaNum,
			Integer pacit,
			Integer maxpatentsCit,
			Integer efpData,
			Integer maxefpData,
			Integer eqWorkData,
			Integer maxeqWorkData,
			Integer eqWork4Data,
			Integer maxeqWork4Data
			) {
		
		Double log1 = (panum > maxpaNum) ? 1D : Math.log(panum + 1.0)/Math.log(maxpaNum);
		Double log2 = (pacit > maxpatentsCit) ? 1D : Math.log(pacit + 1.0)/Math.log(maxpatentsCit);
		Double log3 = (efpData > maxefpData) ? 1D : Math.log(efpData + 1.0)/Math.log(maxefpData);
		Double log4 = (eqWorkData > maxeqWorkData) ? 1D : Math.log(eqWorkData + 1.0)/Math.log(maxeqWorkData);
		Double log5 = (eqWork4Data > maxeqWork4Data) ? 1D : Math.log(eqWork4Data + 1.0)/Math.log(maxeqWork4Data);
		
		return (log1 * 0.3 + log2 * 0.3 + log3 * 0.1 + log4 * 0.15 + log5 * 0.15) * 100;	
	}
	
	/**
	 * 计算当前年的aindex
	 * (log(hindex+1.0)/log(maxhindex))*0.25+(log(pnum+1.0)/log(maxpnum))*0.35
	 *  +(log(citations+1.0)/log(maxcitations))*0.1+(log(bnum+1.0)/log(maxbnum)*0.3)
	 */
	private static Double computeAindex(Integer hindex,
			Integer maxhindex,
			Integer pnum,
			Integer maxpnum,
			Integer citations,
			Integer maxcitations,
			Integer bnum,
			Integer maxbnum) {
		Double log_hIndex = (hindex > maxhindex) ? 1D : Math.log(hindex + 1.0)/Math.log(maxhindex);
		Double log_pnum = (pnum > maxpnum) ? 1D : Math.log(pnum + 1.0)/Math.log(maxpnum);
		Double log_citations = (citations > maxcitations) ? 1D : Math.log(citations + 1.0)/Math.log(maxcitations);
		Double log_bnum = (bnum > maxbnum) ? 1D : Math.log(bnum + 1.0)/Math.log(maxbnum);
		return (log_hIndex * 0.25 + log_pnum * 0.35 + log_citations * 0.1 + log_bnum * 0.3) * 100;
	}
	
	
	/**
	 * 获取最大year值 和 最小year值
	 */
	private static Integer[] computeYear(int minYear,int maxYear,List<NumForYearDTO> dto){
		
		if(dto.size() == 0) {
			Integer[] oo = {minYear,maxYear};
			return oo;
		}
		Integer min_ = dto.get(0).getYear();
		Integer max_ = dto.get(dto.size() - 1).getYear();
		
		if(minYear == 0) {
			minYear = min_;
		}

		if(minYear > min_){
			minYear = min_;
		}
		
		if(maxYear < max_){
			maxYear = max_;
		}
		Integer[] o = {minYear,maxYear};
		return o;
	}
	
	/**
	 * 趋势判断
	 * @param indexArray
	 * @return
	 */
	public Integer trend(List<Double> indexArray) {
		if(indexArray != null && indexArray.size() > 1) {
        	if(indexArray.get(indexArray.size()-1) > indexArray.get(indexArray.size()-2)) {
        		return 1;
        	}else if(indexArray.get(indexArray.size()-1) < indexArray.get(indexArray.size()-2)) {
        		return -1;
        	}
        }
		return 0;
	}
	

	/**
	 * index 趋势图
	 */
	public ScholarExpert queryNindex(ExpertQ expertQ) {
		
		if (expertQ == null || expertQ.getfId() == null) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		//判断是否存有趋势图数据
		ScholarExpertQ scholarExpertQ = new ScholarExpertQ();
		scholarExpertQ.setfId(expertQ.getfId());
		
		ScholarExpert scholarExpert = scholarExpertService.queryUnique(scholarExpertQ, ScholarExpert.class);
		
		if(scholarExpert == null) {
			scholarExpert = computeNindex(expertQ.getfId());
			scholarExpertService.insert(scholarExpert);
		}
		
		return scholarExpert;
	}
	
	/**
	 * 查询四个index趋势
	 */
	public ScholarExpert queryIndexTrend(Long fId) {
		if (fId == null) {
			throw new MyException(ResponseCode.PARAMETER_REQUIRED);
		}
		
		//判断是否存有趋势图数据
		ScholarExpertQ scholarExpertQ = new ScholarExpertQ();
		scholarExpertQ.setfId(fId);
		
		return scholarExpertService.queryUnique(scholarExpertQ, ScholarExpert.class);
	}

}
