package org.example.searcher;

import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.*;

public class DocSearcher {
    //停用词文件路径
    private static  String STOP_WORD_PATH=null;
    static {
        if (Config.isOnline){
            STOP_WORD_PATH="/home/ubuntu/doc_searcher_index/stop_word.txt";
        }else {
            STOP_WORD_PATH="D:/doc_searcher_index/stop_word.txt";
        }
    }
    //使用Hashset来保存停用词
    private HashSet<String> stopWords=new HashSet<>();
    //完成索引加载
    private Index index=new Index();
    public DocSearcher(){
        index.load();
    }
    //完成搜索过程
    public List<Result> search(String query){
        //1.[分词]针对query查询进行分词
        List<Term> oldTerms=ToAnalysis.parse(query).getTerms();
        List<Term> terms=ToAnalysis.parse(query).getTerms();
        for (Term term:oldTerms){
            if (stopWords.contains(term)){
                continue;
            }
            terms.add(term);
        }
        //2.[触发]针对分词结果来查倒排
        List<List<Weight>> termResult=new ArrayList<>();
        for (Term term:terms){
            String word=term.getName();
            List<Weight> invertedList=index.getInverted(word);
            if (invertedList==null){
                continue;
            }
            termResult.add(invertedList);
        }
        //3、针对相同的文档进行去重
        List<Weight> allTermResult=mergeResult(termResult);
        //4.针对触发的结果按照权重排序
        allTermResult.sort(new Comparator<Weight>() {
            @Override
            public int compare(Weight o1, Weight o2) {
                //按照降序排序
                return o2.getWeight()-o1.getWeight();
            }
        });
        //5.针对排序的结果，去查正排，返回数据
        List<Result> results=new ArrayList<>();
        for (Weight weight:allTermResult){
            DocInfo docInfo=index.getDocInfo(weight.getDocId());
            Result result=new Result();
            result.setTitle(docInfo.getTitle());
            result.setUrl(docInfo.getUrl());
            result.setDesc(GenDesc(docInfo.getContent(),terms));
            results.add(result);
        }
        return results;
    }
    static class Pos{
        public int row;
        public int col;
        public Pos(int row, int col) {
            this.row = row;
            this.col = col;
        }
    }
    private List<Weight> mergeResult(List<List<Weight>> source) {
    //1.针对每一行按照升序排序
        for (List<Weight> curRow:source){
            curRow.sort(new Comparator<Weight>() {
                @Override
                public int compare(Weight o1, Weight o2) {
                    return o1.getDocId()-o2.getDocId();
                }
            });
        }
    //2.借助优先级队列，争对多个有序数组进行合并
        List<Weight> target=new ArrayList<>();
      PriorityQueue<Pos> queue=new PriorityQueue<>(new Comparator<Pos>() {
          @Override
          public int compare(Pos o1, Pos o2) {
              Weight w1=source.get(o1.row).get(o1.col);
              Weight w2=source.get(o2.row).get(o2.col);
              return w1.getDocId()-w2.getDocId();
          }
      });
      //3.初始化队列，把每一行第一个元素放入队列
        for (int row=0;row<source.size();row++){
            queue.offer(new Pos(row,0));
        }
      //循环的取每行首个元素
      while (!queue.isEmpty()){
          Pos minPos=queue.poll();
          Weight curWeight=source.get(minPos.row).get(minPos.col);
          if (target.size()>0){
              Weight lastWeight=target.get(target.size()-1);
              //遇到相同的文章，权重相加
              if (lastWeight.getDocId()==curWeight.getDocId()){
                  lastWeight.setWeight(lastWeight.getWeight()+curWeight.getWeight());
              }else {
                  target.add(curWeight);
              }
          }else {
              target.add(curWeight);
          }
          Pos newPos=new Pos(minPos.row,minPos.col+1);
          if (newPos.col>=source.get(newPos.row).size()){
              continue;
          }
          queue.offer(newPos);
      }
      return target;
    }

    private String GenDesc(String content, List<Term> terms) {
        int firstPos=-1;
        for (Term term:terms){
            String word=term.getName();
            //避免出现word）获知（word的情况
            content=content.toLowerCase().replaceAll("\\b"+word+"\\b"," "+word+" ");
            firstPos=content.toLowerCase().indexOf(" "+word+" ");
            if (firstPos>=0){
                break;
            }
        }
        if (firstPos==-1){
            if (content.length()>160){
                return content.substring(0,160)+"...";
            }
            return content;
        }
        String desc="";
        int descBeg=firstPos<60?0:firstPos-60;
        if (descBeg+160>content.length()){
            desc=content.substring(descBeg);
        }else {
            desc=content.substring(descBeg,descBeg+160);
        }
        //把描述中和分词结果相同的部分设置为斜体加上<i>标签，方便前端标红
        for (Term term:terms){
            String word=term.getName();
            //进行忽略大小写的全词匹配
            desc=desc.replaceAll("(?i) "+word+" ","<i> "+word+" <i>");
        }
        return desc;
    }
    public void loadStopWords(){
        try (BufferedReader bufferedReader=new BufferedReader(new FileReader(STOP_WORD_PATH))){
           while (true){
               String line=bufferedReader.readLine();
               if (line==null){
                   break;
               }
               stopWords.add(line);
           }
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    public static void main(String[] args) {
        DocSearcher docSearcher=new DocSearcher();
        Scanner scanner=new Scanner(System.in);
        while (true){
            System.out.println("->");
            String query=scanner.next();
            List<Result> results = docSearcher.search(query);
            for (Result result:results){
                System.out.println("==================");
                System.out.println(result);
            }
        }
    }

}
