package get_score;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import static org.apache.spark.sql.functions.*;
import org.apache.spark.sql.expressions.Window;
import org.apache.spark.sql.expressions.WindowSpec;
import org.apache.spark.sql.functions;
import org.apache.spark.sql.Encoder;

import scala.Tuple2;

public class vid_titles_DCG {
    public static void main(String[] args) {
        //spark实例
        SparkSession spark = SparkSession.builder()
                .appName("SentenceScorer")
                .master("local[*]")
                .getOrCreate();
        spark.sparkContext().setLogLevel("ERROR");
        //读取索引表
        Dataset<Row> index_table = spark.read()
            .format("org.apache.spark.sql.execution.datasources.json.JsonFileFormat")
            .load("/root/autodl-tmp/MR/bio_ir/out4_json")
            .cache();
        //读取qrel 关联度对照表
        Dataset<Row> qrel_table = spark.read()
                .format("org.apache.spark.sql.execution.datasources.json.JsonFileFormat")
                .load("/root/autodl-tmp/MR/bio_ir/data/qrel.json")
                .cache();
        //读取topic表
        Dataset<Row> topic_tables = spark.read()
                .format("org.apache.spark.sql.execution.datasources.json.JsonFileFormat")
                .load("/root/autodl-tmp/MR/bio_ir/data/vid_titles.json");
        
        List<Row> topic_rows = topic_tables.collectAsList();

        final Double K1=1.5,B=0.5;
        final long TOTAL_WORD =  310000;
        final long TOTAL_PAPER = 3633;

        int total_topic=0;
        Double total_NDCG=0.0;

        for (Row topic_row : topic_rows) {
                total_topic++;

                String topic_id = topic_row.getString(topic_row.fieldIndex("topic_id"));
                String topic_content = topic_row.getString(topic_row.fieldIndex("topic_content"));
                //分词
                String[] words = topic_content.split("\\s+"); 


                List<String> wordList = new ArrayList<>(Arrays.asList(words));
                Dataset<Row> scoredData = index_table
                .filter(col("word").isin(wordList.toArray()))
                .withColumn("score",log(
                        (lit(TOTAL_PAPER).$minus(col("distinct_doc_id")).plus(0.5))
                                .$div(col("distinct_doc_id").plus(0.5))
                        .plus(1)
                        ).multiply(
                                (col("count").multiply(lit(K1+1))).$div(
                                        col("count").plus(lit(K1).multiply(
                                                lit(1-B).plus(lit(B).multiply(col("doc_len").$div(lit(TOTAL_WORD))))
                                        ))
                                )
                        )
                )
                .select("doc_id","score");

                //计算文章的总得分
                Dataset<Row> aggregatedScores = scoredData
                        .groupBy("doc_id")
                        .agg(sum("score").alias("total_score"));

                // 为了得到排名，使用 window 函数
                WindowSpec windowSpec = Window.orderBy(functions.desc("total_score"));

                // 为每个 paper_id 计算排名
                final int NUM=10;
                Dataset<Row> rankedScores = aggregatedScores
                        .withColumn("rank", functions.row_number().over(windowSpec))
                        .filter(functions.col("rank").leq(NUM));//只保留排名前10的
                //rankedScores.show();
                

                // Collecting the rows into a list
                List<Row> rankedScoresList = rankedScores.collectAsList();
                double DCG = 0.0;
                List<Integer> relevancy_list = new ArrayList<>();
                for (Row row : rankedScoresList) {
                        double dcg=0.0;
                        int rank = row.getInt(row.fieldIndex("rank"));
                        String doc_id = row.getString(row.fieldIndex("doc_id"));
                        Dataset<Row> dcg_table = qrel_table.filter(col("doc_id").equalTo(doc_id))
                                        .filter(col("topic_id").equalTo(topic_id));
                        //dcg_table.show();
                        if(dcg_table.isEmpty()){
                                relevancy_list.add(0);
                                continue;
                        }
                        // 获取第一行数据
                        //.select("doc_id").first().getString(0);
                        Row firstRow = dcg_table.first();

                        // 获取 "relevancy" 列的值，假设它是一个整数类型
                        Integer relevancy = (int)(row.getDouble(firstRow.fieldIndex("relevancy")) );
                        relevancy_list.add(relevancy);
                        DCG+=relevancy/( Math.log(rank + 1) / Math.log(2.0) );
                }
                Collections.sort(relevancy_list, Collections.reverseOrder());
                Double IDCG = 0.0;
                int i=1;
                for(Integer relevancy:relevancy_list){
                        IDCG+=relevancy/( Math.log(i + 1) / Math.log(2.0) );
                        i++;
                }
                Double NDCG = DCG/IDCG;
                total_NDCG += NDCG; 
        }
        Double avg_NDCG = total_NDCG/total_topic;
        System.out.println("Average NDCG: " + avg_NDCG);
        spark.stop();
    }

}
