package com.example.lzh.component;

import com.example.lzh.component.Index;
import com.example.lzh.model.DocInfo;
import com.example.lzh.model.Result;
import com.example.lzh.model.Weight;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.web.bind.annotation.RequestMapping;

import java.awt.image.BufferedImage;
import java.io.*;
import java.util.*;

/**
 * @Question DocSearch
 * @Date 2023/1/28 20:54
 * @Solution
 */
@Component
public class DocSearch {
    private Index index=new Index();
    private static final String STOP_WORD_PATH="/home/ur-search/stop_words.txt";
    private HashSet<String> stopWords=new HashSet<>();
    public DocSearch() throws InterruptedException {
        index.load();
        loadStopWords();
    }
    public DocInfo searchById(int id) throws InterruptedException {
        //正排索引
        Index index = new Index();
        return index.getDocInfo(id);
    }
    public List<Result> searchInverted(String str) throws InterruptedException {
        //先分词
        List<Term> oldTerms = ToAnalysis.parse(str).getTerms();
        List<Term> terms=new LinkedList<>();
        //过滤暂停词
        for(Term term:oldTerms) {
            if(stopWords.contains(term.getName())) {
                continue;
            }
            terms.add(term);
        }
        //根据分词将所有结果查询出来
        List<List<Weight>> list=new LinkedList<>();
        for(Term term:terms) {
            String word = term.getName();
            List<Weight> one = index.getInverted(word);
            if(one==null) {
                continue;
            }
            //将其中一个分词查询出来的结果加入到result中
            list.add(one);
        }
        //针对多个分词结果相同文档进行权重合并
        List<Weight> allTermResult = mergeResult(list);
        //根据Weight集合查询出DocInfo集合,包装成Result
        List<Result> results = new LinkedList<>();
        for(Weight weight : allTermResult) {
            Result result = new Result();
            DocInfo docInfo = index.getDocInfo(weight.getDocId());
            result.setTitle(docInfo.getTitle());
            result.setUrl(docInfo.getUrl());
            result.setDesc(GenDesc(docInfo.getContent(),terms));
            results.add(result);
        }
        //返回结果
        return results;
    }
    public class Pos {
        public int row;
        public int col;

        public Pos(int row, int col) {
            this.row = row;
            this.col = col;
        }
    }


    private List<Weight> mergeResult(List<List<Weight>> list) {
        //根据Weight排序
        for(List<Weight> weightList :list) {
            weightList.sort(new Comparator<Weight>() {
                @Override
                public int compare(Weight o1, Weight o2) {
                    return o2.getWeight() - o1.getWeight();
                }
            });
        }
        //合并
        List<Weight> result=new LinkedList<>();
        PriorityQueue<Pos>  queue = new PriorityQueue<>(new Comparator<Pos>() {
            @Override
            public int compare(Pos o1, Pos o2) {
                //根据pos值找到对应Weight对象
                Weight w1=list.get(o1.row).get(o1.col);
                Weight w2=list.get(o2.row).get(o2.col);
                return w1.getDocId()-w2.getDocId();
            }
        });
        for(int row=0;row< list.size();row++) {
            queue.offer(new Pos(row,0));
        }
        while(!queue.isEmpty()) {
            Pos minPos=queue.poll();
            Weight curWeight=list.get(minPos.row).get(minPos.col);
            if(result.size()>0) {
                Weight lastWeight=result.get(result.size()-1);
                if(lastWeight.getDocId()== curWeight.getDocId()) {
                    lastWeight.setWeight(lastWeight.getDocId()+curWeight.getDocId());
                } else {
                    result.add(curWeight);
                }
            } else {
                result.add(curWeight);
            }
            Pos newPos=new Pos(minPos.row, minPos.col+1);
            if(newPos.col>=list.get(newPos.row).size()) {
                continue;
            }
            queue.offer(newPos);
        }
        return result;
    }
    private String GenDesc(String content,List<Term> terms) {
        //遍历分词结果，找到哪些词在content中存在
        int firstPos=-1;
        for(Term term:terms) {
            String word=term.getName();
            //为了实现全字匹配，必须要将word作为一个独立的词查找
            content=content.toLowerCase().replaceAll("\\b"+word+"\\b"," "+word+" ");
            firstPos=content.toLowerCase().indexOf(" "+word+" ");
            if(firstPos>0) {
                //找到了位置
                break;
            }
        }
        if(firstPos==-1) {
            if(content.length()>160) {
                return content.substring(0,160)+"...";
            }
            return content;
        }
        String desc="";
        int descBeg=firstPos<60?0:firstPos-60;
        if(descBeg+160>content.length()) {
            desc = content.substring(descBeg,content.length());
        } else {
            desc = content.substring(descBeg, descBeg + 160) + "...";
        }

        //标红
        for(Term term:terms) {
            String word=term.getName();
            desc=desc.replaceAll( "(?i) "+word+" ","<i> "+word+" </i>");
        }
        return desc;
    }

    public void loadStopWords() {
        try(BufferedReader bufferedReader = new BufferedReader(new FileReader(STOP_WORD_PATH))) {
            while(true) {
                String line= bufferedReader.readLine();
                if(line==null) {
                    break;
                }
                stopWords.add(line);
            }
        }  catch (IOException e) {
            e.printStackTrace();
        }
    }
}
