package com.paper.check.algorithm.core;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.paper.check.search.core.exexutor.DefaultServiceExecutor;
import org.paper.check.search.core.exexutor.ServiceExecutor;
import org.paper.check.search.core.model.EdismaxEntry;
import org.paper.check.search.core.model.SearchMapParam;
import org.paper.check.search.core.model.SearchResponse;
import org.paper.check.search.core.model.SearchTypeEnum;
import org.paper.check.search.core.solr.SolrSearchService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.wltea.analyzer.model.Lexeme;

import com.alibaba.dubbo.common.utils.CollectionUtils;
import com.paper.check.algorithm.model.CopyWordsModel;
import com.paper.check.algorithm.model.PaperCheckReoprtModel;
import com.paper.check.algorithm.model.SearchPaperIndexVo;
import com.paper.check.analyzer.service.TextService;
import com.paper.check.analyzer.service.TextServiceImpl;
import com.paper.check.framework.utils.ContentSpltUtils;

@Service
public class CheckAlgorithmCore {

    TextService textService = new TextServiceImpl();

    @Autowired
    SolrSearchService searchService;
    @Autowired
    DefaultServiceExecutor defaultServiceExecutor;
    @Value("${coreName:trade}")
    private String coreName;

    public PaperCheckReoprtModel paperCheck(String sourceContent) {
        PaperCheckReoprtModel paperCheckReoprtModel = new PaperCheckReoprtModel();
        List<String> strings = ContentSpltUtils.splitBySymbol(sourceContent); // 通过标点符号把文章先分开
      
        List<CopyWordsModel> copyWordsModels = new ArrayList<CopyWordsModel>();
        for (String string : strings) {
            CopyWordsModel copyWordsModel = new CopyWordsModel();
            // 先对原数据进行分词
            List<Lexeme> lexemes = textService.analyzer(string);
            // 通过solr召回句子
            List<SearchPaperIndexVo> paperIndexVos = searchText(lexemes);

            if (!CollectionUtils.isEmpty(paperIndexVos)) {
                for (SearchPaperIndexVo paperIndexVo : paperIndexVos) {
                    String test = paperIndexVo.getContent();
                    List<Lexeme> lexemes2 = textService.analyzer(test);
                    // 对召回的句子进行相似性对比
                    float rap = CalCopyRap(LexemeToString(lexemes), LexemeToString(lexemes2));
                    float reverseRap = CalCopyRap(LexemeToString(lexemes2), LexemeToString(lexemes));
                    if (rap >= 0.6 && reverseRap >= 0.6) {
                        copyWordsModel.setCopyRap(rap);
                        copyWordsModel.setCopyWords(test);
                        copyWordsModel.setSourceWords(string);
                        copyWordsModel.setMysqlId(paperIndexVo.getMysqlId());
                        copyWordsModel.setMysqlTableName(paperIndexVo.getMysqlTableName());
                        copyWordsModels.add(copyWordsModel);
                        break;
                    }
                }
            }
        }
        // 生成报告
        paperCheckReoprtModel.setListCopys(copyWordsModels);
        paperCheckReoprtModel.setTotalNums(strings.size());                   //拆分后文章一共有多少句子
        paperCheckReoprtModel.setCopyNums(copyWordsModels.size());            //抄袭的句子
        float rap = (float) paperCheckReoprtModel.getCopyNums() / (float) paperCheckReoprtModel.getTotalNums();
        paperCheckReoprtModel.setCopyRap(rap);                               //整篇文章的抄袭率
        return paperCheckReoprtModel;   
    }

    public List<SearchPaperIndexVo> searchText(List<Lexeme> lexemes) {
        List<SearchPaperIndexVo> indexVos = null;

        SearchMapParam searchParam = new SearchMapParam();
        searchParam.setServiceType(SearchMapParam.ServiceEnum.PAPER_CHECK);
        searchParam.setQueryTextSearchType(SearchTypeEnum.OR);
        Map<String, String> queryMap = new HashMap<String, String>();
        StringBuffer stringBuffer = new StringBuffer();
        for (Lexeme lexeme : lexemes) {
            if (stringBuffer.toString().length() > 1)
                stringBuffer.append(" ");
            stringBuffer.append(lexeme.getLexemeText());

        }
        queryMap.put("content", stringBuffer.toString());
        searchParam.setQueryMap(queryMap);

        EdismaxEntry edismaxEntry = new EdismaxEntry();
        edismaxEntry.setMm("60%");
        searchParam.setEdismaxEntry(edismaxEntry);
        SearchResponse result = search(searchParam);
        indexVos = SearchRes2IndexUtils.search2IndexVo(result);
        return indexVos;
    }

    public SearchResponse search(SearchMapParam searchParam) {
        if (searchParam == null)
            return null;
        ServiceExecutor executor = defaultServiceExecutor;

        // 解析param(默认方法)
        SolrQuery query = executor.parseParam(searchParam);
        if (searchParam.getServiceType() != null) {
            coreName = "paper";
        }
        // 执行
        QueryResponse queryRsp = executor.search(coreName, query);

        // 结果格式化处理
        SearchResponse searchRsp = executor.formatResponse(queryRsp);

        return searchRsp;
    }

    public List<String> LexemeToString(List<Lexeme> lexemes) {
        if (lexemes == null || lexemes.size() == 0)
            return null;
        List<String> lists = new ArrayList<String>();
        for (Lexeme lexeme : lexemes) {
            lists.add(lexeme.getLexemeText());
        }
        return lists;
    }

    // 最简单的overlap
    public float CalCopyRap(List<String> sourceAnalyzer, List<String> targetAnalyzer) {
        float rap = 0f;
        if (sourceAnalyzer == null || sourceAnalyzer.size() == 0)
            return rap;
        if (targetAnalyzer == null || targetAnalyzer.size() == 0)
            return rap;
        boolean mark[] = new boolean[targetAnalyzer.size()];
        int copyCount = 0;
        for (int j = 0, sourceLen = sourceAnalyzer.size(); j < sourceLen; j++) {
            for (int i = 0, tarLen = targetAnalyzer.size(); i < tarLen; i++) {
                if (!mark[i] && sourceAnalyzer.get(j).equals(targetAnalyzer.get(i))) {
                    mark[i] = true;
                    copyCount++;
                    break;
                }
            }
        }
        rap = (float) copyCount / (float) sourceAnalyzer.size();
        return rap;
    }
}
