package com.cl.spark.node;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.cl.spark.base.BaseSparkNode;
import com.cl.spark.dto.SparkParam;
import com.cl.spark.dto.SparkResult;
import com.cl.spark.enums.SparkNodeEnum;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.expressions.Window;
import org.apache.spark.sql.functions;
import org.springframework.stereotype.Component;
import scala.Tuple2;
import scala.annotation.meta.param;

import java.util.ArrayList;
import java.util.List;
import java.util.Objects;

import static org.apache.spark.sql.functions.col;

@Component
public class MatchNode extends BaseSparkNode {

    @Override
    public SparkResult process(SparkParam sparkParam) {
        JSONArray res = new JSONArray();

        List<Dataset<Row>> datasetList = sparkParam.getDatasetList();
        JSONObject expression = sparkParam.getNodeExpression();

        Dataset<Row> data1DF = datasetList.get(0);
        Dataset<Row> data2DF = datasetList.get(1);

        List<String> fields1 = expression.getJSONArray("fields1").toJavaList(String.class);
        List<String> fields2 = expression.getJSONArray("fields2").toJavaList(String.class);

        String outputType = expression.getString("outputType");
        Boolean output = null;
        if ("匹配".equals(outputType)) {
            output = true;
        } else if ("不匹配".equals(outputType)) {
            output = false;
        }
        List<Column> columnList = new ArrayList<>();
        for (int i = 0; i < fields1.size(); i++) {
            Column column = data1DF.col(fields1.get(i)).equalTo(data2DF.col(fields2.get(i)));
            columnList.add(column);
        }

        Dataset<Row> joinedDF = data1DF.as("df1").joinWith(data2DF.as("df2"),
                        columnList.stream().reduce(Column::and).orElse(functions.lit(true)), // 如果fields1List为空，则总是为真
                        "inner")
                .withColumn("MATCH", functions.when(
                        functions.countDistinct("df2.*").over(Window.partitionBy(fields1.stream()
                                .map(data1DF::col)
                                .toArray(Column[]::new))).equalTo(fields2.size()), output).otherwise(!output)
                );

        return SparkResult.success(joinedDF);
    }


    @Override
    public SparkNodeEnum getType() {
        return SparkNodeEnum.MATCH;
    }
}
