package cn.ilovese.collect.serviceimpl.report.similaritystrategy;

import cn.ilovese.collect.datamapper.report.ReportMapper;
import cn.ilovese.collect.datamapper.report.TFIDFSimilarityMapper;
import cn.ilovese.collect.datamapper.task.TaskMapper;
import cn.ilovese.collect.datamapper.user.UserMapper;
import cn.ilovese.collect.datamapper.work.WorkMapper;
import cn.ilovese.collect.po.report.Report;
import cn.ilovese.collect.po.report.TFIDFSimilarity;
import cn.ilovese.collect.service.report.similaritystrategy.TextSimilarityStrategy;
import cn.ilovese.collect.vo.report.ReportFormVO;
import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.corpus.tag.Nature;
import com.hankcs.hanlp.seg.common.Term;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;

@Service
public class TFIDFStrategy implements TextSimilarityStrategy {


    @Resource
    ReportMapper reportMapper;
    @Resource
    TFIDFSimilarityMapper tfidfSimilarityMapper;


    /**
     * taskId用于计算IDF，基于taskId计算两个string的相似度
     * @param taskId
     * @param text1
     * @param text2
     * @return
     */
    public Double calcTextSimilarityByTFIDF(Integer taskId,String text1,String text2){
        Double res=0.0;
        HashMap<String,Integer> word2id=new HashMap<String,Integer>();
        System.out.println(text1);
        List<Term> words1= HanLP.segment(text1);
        List<Term> words2= HanLP.segment(text2);
        Integer tot=0;
        for(Term tm:words1){
            if(!word2id.containsKey(tm.word)){
                word2id.put(tm.word,tot);
                tot++;
            }
        }
        for(Term tm:words2){
            if(!word2id.containsKey(tm.word)){
                word2id.put(tm.word,tot);
                tot++;
            }
        }
        System.err.println("word2dic:"+word2id);
        List<Double>tfidf1=calcTFIDF(words1,taskId,word2id);
        List<Double>tfidf2=calcTFIDF(words2,taskId,word2id);
        System.err.println(tfidf1);
        System.err.println(tfidf2);
        //两个长度为tot的向量，计算余弦相似度
        Double length1=0.0,length2=0.0,length=0.0;
        for(int i=0;i<tot;i++){
            Double d1=tfidf1.get(i);
            if(d1==null)d1=0.0;
            Double d2=tfidf2.get(i);
            if(d2==null)d2=0.0;
            length+=d1*d2;length1+=d1*d1;length2+=d2*d2;
        }
        System.err.println(length+" "+length1+" "+length2);
        res=length/Math.sqrt(length1*length2);

        return res;
    }

    /**
     * 插入一条报告后更新tfidf词频
     * 对应 词-这个词在这个task的所有报告中出现了多少次
     * @param reportVO
     */
    public void updateTFIDF(ReportFormVO reportVO){
        //插入一条报告后更新tfidf词频
        String src=reportVO.getDescription()+reportVO.getSteps()+reportVO.getEnvironment();
        Integer taskId=reportVO.getTaskid();

        List<Term> words= HanLP.segment(src);//分词并遍历
        HashMap<String, Boolean> mp = new HashMap<>();
        for(Term tm:words){
            String word=tm.word;
            if(mp.get(word) != null && mp.get(word)) continue;
            mp.put(word,true);
            TFIDFSimilarity tfidfSimilarity=null;
            try {
                tfidfSimilarity=tfidfSimilarityMapper.selectByWordAndTask(word, taskId);
                if(tfidfSimilarity!=null){
                    tfidfSimilarity.setFrequency(tfidfSimilarity.getFrequency()+1);
                    tfidfSimilarityMapper.updateByPrimaryKey(tfidfSimilarity);
                }
                else{
                    tfidfSimilarity=new TFIDFSimilarity();
                    tfidfSimilarity.setTaskId(taskId);
                    tfidfSimilarity.setFrequency(1);
                    tfidfSimilarity.setWord(word);
                    tfidfSimilarityMapper.insert(tfidfSimilarity);
                }
            }catch (Exception e){
                e.printStackTrace();
            }
        }
    }


    /**
     * 把一系列词语（分词获得）转化成TFIDF向量数字表示
     * @param words
     * @param taskId
     * @param word2id
     * @return
     */
    public List<Double> calcTFIDF(List<Term> words,Integer taskId,HashMap<String,Integer> word2id){
        List<Double>res=new ArrayList<Double>();
        int reportNum=reportMapper.selectByTaskId(taskId).size();
        HashMap<String,Integer>mp=new HashMap<String,Integer>();
        //List<Term> words= HanLP.segment(src);//分词并遍历
        Integer maxAppear=1;//出现最多的词,用于规格化词频
        for(Term tm:words){
            if(mp.containsKey(tm.word)){
                System.err.println("word2: "+tm.word);
                Integer occur=mp.get(tm.word)+1;
                mp.put(tm.word,occur);
                maxAppear=Math.max(maxAppear,mp.get(tm.word));
            }
            else mp.put(tm.word,1);
        }
        Integer len=word2id.size()+1;
        for(int i=0;i<=len;i++)
            res.add(0.0);
        for(Term tm:words){
//            if(mp.get(tm.word)==null||mp.get(tm.word)==0){
//                continue;//只计算第一次
//            }
            if(tm.nature.equals(Nature.w)||tm.word.equals(' ')||tm.word.equals("\n"))
                continue;
            Double tf= Double.valueOf(mp.get(tm.word))/Double.valueOf(maxAppear);
            if(tm.nature.equals(Nature.w) || tm.word.equals(" ") || tm.word.equals("\n"))
                continue;
            TFIDFSimilarity tfidfSimilarity= tfidfSimilarityMapper.selectByWordAndTask(tm.word,taskId);
            Integer contains=tfidfSimilarity.getFrequency();
            Double idf=Math.log((double) (reportNum + 1) / (double) (contains));
            Integer index=word2id.get(tm.word);
            //System.err.println("tfidf:"+tm+" "+tf+" "+idf+" "+reportNum+" "+contains);
            if(index!=null){
                Double base=res.get(index);
                if(base==null)base=0.0;
                res.set(index,tf*idf+base);
            }
            //mp.put(tm.word,0);
        }
        return res;//该报告用tfidf向量形式表示
    }

    @Override
    public Double CalcSimilarity(ReportFormVO reportVO, Integer taskid){
        List<Report> reportList = reportMapper.selectByTaskId(taskid);
        String thisText=reportVO.getDescription()+reportVO.getSteps()+reportVO.getEnvironment();
        Double res=0.0;//暂时方案：取最大重复度
        for(Report report: reportList){
            String text=report.getDescription()+report.getSteps()+report.getDeviceInfo();
            res=Math.max(res,calcTextSimilarityByTFIDF(taskid,thisText,text));
        }
        return res;
    }

    @Override
    public Double CalSimilarityOneToOne(Report first, Report second) {
        String firstInfo = first.getDescription() + first.getSteps() + first.getDeviceInfo();
        String secondInfo = second.getDescription() + second.getSteps() + second.getDeviceInfo();
        Integer taskId = first.getTaskId();
        return calcTextSimilarityByTFIDF(taskId,firstInfo,secondInfo);
    }

}
