package com.cl.spark.node;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.cl.spark.base.BaseSparkNode;
import com.cl.spark.dto.SparkParam;
import com.cl.spark.dto.SparkResult;
import com.cl.spark.enums.SparkNodeEnum;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.expressions.Window;
import org.apache.spark.sql.expressions.WindowSpec;
import org.apache.spark.sql.functions;
import org.springframework.stereotype.Component;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

// 去重取最新的
@Component
public class GroupDropDuplicatesNode extends BaseSparkNode {

    @Override
    public SparkResult process(SparkParam sparkParam) {
        Dataset<Row> dataset = sparkParam.getSparkResultList().get(0).getDataset();
        JSONObject expression = sparkParam.getNodeExpression();

        JSONArray groupByFields = expression.getJSONArray("groupByFields");
        JSONArray sortFields = expression.getJSONArray("sortFields");
        String sortType = expression.getString("sortType");
        Integer reserveCount = expression.getInteger("reserveCount");
        List<String> groupByFieldsList = new ArrayList<>();
        for (int i = 0; i < groupByFields.size(); i++) {
            groupByFieldsList.add(groupByFields.getString(i));
        }
        List<String> sortFieldsList = new ArrayList<>();
        for (int i = 0; i < sortFields.size(); i++) {
            sortFieldsList.add(sortFields.getString(i));
        }

        dataset = deduplicateData(dataset, groupByFieldsList, sortFieldsList, sortType,reserveCount);
        return SparkResult.success(dataset);
    }

    public Dataset<Row> deduplicateData(Dataset<Row> dataset, List<String> groupByFields, List<String> sortFields, String sortType,Integer reserveCount) {
        String[] groupByFieldsArray = groupByFields.toArray(new String[0]);

        Column[] partitionColumns = Arrays.stream(groupByFieldsArray)
                .map(functions::col)
                .toArray(Column[]::new);
        Column sortFieldType;
        if ("desc".equalsIgnoreCase(sortType)) {
            sortFieldType = functions.col(sortFields.get(0)).desc();
        } else {
            sortFieldType = functions.col(sortFields.get(0)).asc();
        }
        // 定义窗口函数
        WindowSpec windowSpec = Window.partitionBy(partitionColumns)
                .orderBy(sortFieldType);

        // 添加行号
        Dataset<Row> withRowNumber = dataset.withColumn("row_number", functions.row_number().over(windowSpec));

        // 过滤出行号为1的数据
        dataset = withRowNumber.filter(functions.col("row_number").leq(reserveCount)).drop("row_number");
        return dataset;
    }


    @Override
    public SparkNodeEnum getType() {
        return SparkNodeEnum.GROUP_DROP_DUPLICATES;
    }

}
