package bm25;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.Term;
import org.junit.Test;

import java.io.*;
import java.util.*;

/*
 * PRDTermDiscriminator
 *
 * lucene 没有默认实现的 rocchio pseudo relevant feedback 方法
 * 其原理为得到检索的初步结果， 然后使用前面的K个文档获取tf-idf权重最高的几个Term
 * 添加进原始的query, 得到new_query， 用来进一步检索
 *
 * 从有的地方看到要用tfidf权重？？？
 * 但是有的说明只要用词项的频率
 */

public class PRFTermDiscriminator {
    // 公式的超参数
    private static double alpha = 0.75;
    private static double beta = 0.15;
    private static final int addTermNum = 20;
    /**
     * 获取K个相关的term， 用来扩充query, 这里默认的参数为填充30个相关的term
     *
     * @param relevantNum       The num of relevant document
     * @param irrelevantNum     The num of irrelevant document
     * @param documents         the docuemnts retrived in the first time
     * @return                  an arrays of String denote relevant term
     */
    public static List<String> get_top_k_terms(int relevantNum, int irrelevantNum, Document[] documents, String query) throws IOException {

        List<String> relevantContent = new ArrayList<>();
        List<String> irrelevantContent = new ArrayList<>();
        int documentsMaxIndex = documents.length -1;

        for(int i = 0; i < relevantNum; i++){
            String filepath = documents[i].get("path");
            String file_content = read_file2String(filepath);
            relevantContent.add(file_content);
        }
        // 这里存在的问题， lucene检测到这个contents的内容很多，所以没有单独的作为TextField存储
        for(int i = 0; i < irrelevantNum; i++){
            String filepath = documents[documentsMaxIndex - i].get("path");
            String file_content = read_file2String(filepath);
            irrelevantContent.add(file_content);
        }

        // 分词
        String[] querys = splitWord(query);
        String[][] relevants = splitWord(relevantContent);
        String[][] irrelevants = splitWord(irrelevantContent);

        // term - frequency map
        Map<String, Double> term_score_map = new HashMap();

        // deal query 将原始的query中的term设置的高一些
        for(String q: querys){
            if (! term_score_map.containsKey(q)){
                term_score_map.put(q, 0.0);
            }
            term_score_map.put(q, term_score_map.get(q) + 1.0);
        }

        // deal relevant
        for(String[] relevant: relevants){
            for(String r: relevant){
                if(! term_score_map.containsKey(r)){
                    term_score_map.put(r, 0.0);
                }
                term_score_map.put(r, term_score_map.get(r) + 1.0 * alpha / relevantNum);
            }
        }

        // deal relevant
        for(String[] irrelevant : irrelevants){
            for(String ir : irrelevant){
                if(! term_score_map.containsKey(ir)){
                    continue;
                }
                term_score_map.put(ir, term_score_map.get(ir) - 1.0 * beta / irrelevantNum);
            }
        }

        List<TermTuple> sorted_term_tuple_list = sortedTerm(term_score_map);
        List<String> strList = new ArrayList<>();
        for(int i = 0; i < addTermNum ; i++){
            strList.add(sorted_term_tuple_list.get(i).getKey());
        }
//        通过增加原始query中的item的权重， 直接从排序后的结果中获取new query的term
        strList.addAll(Arrays.asList(querys));
        return strList;
    }

    /**
     * 分词,接收一个list的string
     * @param contents
     * @return
     */
    private static String[][] splitWord(List<String> contents) throws IOException {
        String[][] res = new String[contents.size()][];
        int index = 0;
        for (String content: contents){
            res[index] = splitWord(content);
            index ++;
        }
        return res;
    }

    /**
     * 分词 接收一个String作为参数
     * @param content
     * @return
     * @throws IOException
     */
    private static String[] splitWord(String content) throws IOException {
        List<String> res = new LinkedList<>();
        //实例化一个标准分词器
        Analyzer analyzer = new StandardAnalyzer();
        StringReader stringReader = new StringReader(content);
        TokenStream tokenStream = analyzer.tokenStream("", stringReader);
        tokenStream.reset();
        CharTermAttribute term = tokenStream.getAttribute(CharTermAttribute.class);
        int l = 0;
        while (tokenStream.incrementToken()){
            res.add(term.toString());
        }
        // res中的元素为String , 但是这里强制转化为String[] 是否会出问题, 这里果真是不能转换为String[]的
        String[] ret = new String[res.size()];
        for(int i = 0; i < res.size(); i++){
            ret[i] = res.get(i);
        }
        return ret;
    }

    /**
     * 获取排序后的TermTuple表示
     * @param map
     * @return
     */
    private static List<TermTuple> sortedTerm(Map<String, Double> map){
        List<TermTuple> termTupleList = new LinkedList<>();
        for (String key : map.keySet()){
            termTupleList.add(new TermTuple(key, map.get(key)));
        }
        // 排序Term， 选择前K个元素
        Collections.sort(termTupleList);
        return termTupleList;
    }

    /**
     * 用来构建基础的元组
     */
    public static class TermTuple implements Comparable<TermTuple>{
        private String key;
        private double value;

        public TermTuple(String key, double value) {
            this.key = key;
            this.value = value;
        }

        public String getKey() {
            return key;
        }

        public double getValue() {
            return value;
        }

        @Override
        public int compareTo(TermTuple termTuple) {
            return -1 * new Double(this.value).compareTo(termTuple.value);
        }

        @Override
        public String toString() {
            return "TermTuple{" +
                    "key='" + key + '\'' +
                    ", value=" + value +
                    '}';
        }
    }

    /**
     * 读取文件中所有的内容
     * @param filepath
     * @return
     */
    private static String read_file2String(String filepath){
        File file = new File(filepath);
        Long filelength = file.length();
        byte[] file_byte_content = new byte[filelength.intValue()];
        FileInputStream fileInputStream = null;
        try {
            fileInputStream = new FileInputStream(file);
            fileInputStream.read(file_byte_content);
        } catch (FileNotFoundException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        }finally {
            if(fileInputStream != null){
                try {
                    fileInputStream.close();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        }
        return new String(file_byte_content);
    }

    /**
     * 经过测试， StandardAnalyzer可以去除停用词
     */
    @Test
    public void testSplitWord(){
        try {
            String content = "this is my fucking donkey Koulong Liu,it likes zlf, the donkey said so ugly so activate co-attention";
            String[] a = splitWord(content);
            System.out.println(a);
            for(String s : a){
                System.out.print(s);
                System.out.print(" ");
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
