package com.cl.spark.node;

import com.cl.spark.base.BaseSparkNode;
import com.cl.spark.dto.SparkParam;
import com.cl.spark.dto.SparkResult;
import com.cl.spark.enums.SparkNodeEnum;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.functions;
import org.springframework.stereotype.Component;

import java.util.*;

@Component
public class UnionTableNode extends BaseSparkNode {
    @Override
    public SparkResult process(SparkParam sparkParam) {
        Dataset<Row> dataset1 = sparkParam.getSparkResultList().get(0).getDataset();
        Dataset<Row> dataset2 = sparkParam.getSparkResultList().get(1).getDataset();
        // 步骤1：获取两个数据集的所有字段
        String[] columns1 = dataset1.columns();
        String[] columns2 = dataset2.columns();

        // 步骤2：确保每个数据集包含另一个数据集中可能缺失的字段，并使用空值填充
        for (String col : columns1) {
            if (Arrays.stream(columns2).noneMatch(col::equals)) {
                dataset2 = dataset2.withColumn(col, functions.lit(null));
            }
        }
        for (String col : columns2) {
            if (Arrays.stream(columns1).noneMatch(col::equals)) {
                dataset1 = dataset1.withColumn(col, functions.lit(null));
            }
        }
        Dataset<Row> mergedDataset = dataset1.unionByName(dataset2);
        // 步骤3：合并两个数据集
        return SparkResult.success(mergedDataset);
    }
    public static Column[] removeDuplicateColumns(List<Column> columns) {
        // 去除重复的列
        Set<Column> uniqueColumnsSet = new LinkedHashSet<>(columns);
        return uniqueColumnsSet.toArray(new Column[0]);
    }

    // 判断字符串数组中是否包含指定字符串（忽略大小写）
    private static boolean containsIgnoreCase(String[] array, String str) {
        for (String s : array) {
            if (s.equalsIgnoreCase(str)) {
                return true;
            }
        }
        return false;
    }

    @Override
    public SparkNodeEnum getType() {
        return SparkNodeEnum.UNION_TABLE;
    }
}
