package com.cl.spark.node;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.cl.spark.base.BaseSparkNode;
import com.cl.spark.dto.SparkParam;
import com.cl.spark.dto.SparkResult;
import com.cl.spark.enums.SparkNodeEnum;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.functions;
import org.springframework.stereotype.Component;
import scala.annotation.meta.param;

import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import static org.apache.spark.sql.functions.col;
import static org.apache.spark.sql.functions.lit;

@Component
public class UnionTableNode extends BaseSparkNode {
    @Override
    public SparkResult process(SparkParam sparkParam) {
        Dataset<Row> dataset1 = sparkParam.getDatasetList().get(0);
        Dataset<Row> dataset2 = sparkParam.getDatasetList().get(1);
        // 步骤1：获取两个数据集的所有字段
        String[] columns1 = dataset1.columns();
        String[] columns2 = dataset2.columns();

        // 步骤2：确保每个数据集包含另一个数据集中可能缺失的字段，并使用空值填充
        for (String col : columns1) {
            if (Arrays.stream(dataset2.columns()).noneMatch(col::equals)) {
                dataset2 = dataset2.withColumn(col, functions.lit(null));
            }
        }
        for (String col : columns2) {
            if (Arrays.stream(dataset1.columns()).noneMatch(col::equals)) {
                dataset1 = dataset1.withColumn(col, functions.lit(null));
            }
        }

        // 步骤3：合并两个数据集
        Dataset<Row> mergedDataset = dataset1.unionByName(dataset2);
        return SparkResult.success(mergedDataset);
    }

    @Override
    public SparkNodeEnum getType() {
        return SparkNodeEnum.UNION_TABLE;
    }
}
