package com.cl.spark.node;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.cl.spark.base.BaseSparkNode;
import com.cl.spark.dto.SparkParam;
import com.cl.spark.dto.SparkResult;
import com.cl.spark.enums.SparkNodeEnum;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.functions;
import org.springframework.stereotype.Component;

import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;

@Component
public class JoinNode extends BaseSparkNode {

    @Override
    public SparkResult process(SparkParam sparkParam) {
        JSONObject expression = sparkParam.getNodeExpression();
        String joinTypeStr = expression.getString("joinType");


        JSONArray fields1 = expression.getJSONArray("fields1");
        JSONArray fields2 = expression.getJSONArray("fields2");

        Dataset<Row> dataset1 = sparkParam.getSparkResultList().get(0).getDataset();
        Dataset<Row> dataset2 = sparkParam.getSparkResultList().get(1).getDataset();

        String prefix = "repeat";
        String tableName2 = sparkParam.getSparkResultList().get(1).getTableName();
        if (tableName2 == null && !dataset2.isEmpty()) {
            Row row2 = dataset2.first();
            if (Arrays.asList(row2.schema().fieldNames()).contains("TABLE_NAME")) {
                tableName2 = row2.getAs("TABLE_NAME");
            }
        }
        if (tableName2 != null) {
            prefix = tableName2;
        }

        prefix = Stream.of(prefix.split("_"))
                .filter(s -> !s.isEmpty()) // 过滤掉空字符串
                .map(s -> s.substring(0, 1)) // 提取每个非空字符串的第一个字母
                .collect(Collectors.joining());
        int index = 0;
        for (String column : dataset2.columns()) {
            if (Arrays.stream(dataset1.columns()).anyMatch(item -> item.equalsIgnoreCase(column))) {
                String newColumn = prefix + "__" + column;
                if (Arrays.stream(dataset2.columns()).noneMatch(item -> item.equalsIgnoreCase(newColumn))) {
                    dataset2 = dataset2.withColumnRenamed(column, newColumn);
                } else {
                    dataset2 = dataset2.withColumnRenamed(column, newColumn + "_" + index++);
                }
                if (fields2.contains(column)) {
                    fields2.set(fields2.indexOf(column), newColumn);
                }
            }
        }

        List<Column> columnList = new ArrayList<>();
        for (int i = 0; i < fields1.size(); i++) {
            Column column = dataset1.col(fields1.getString(i)).equalTo(dataset2.col(fields2.getString(i)));
            columnList.add(column);
        }


        String joinType = "inner";
        if ("左连接".equals(joinTypeStr)) {
            joinType = "leftouter";
        } else if ("右连接".equals(joinTypeStr)) {
            joinType = "rightouter";
        } else if ("半左连接".equals(joinTypeStr)) {
            joinType = "leftsemi";
        } else if ("左反连接".equals(joinTypeStr)) {
            joinType = "leftanti";
        } else if ("右反连接".equals(joinTypeStr)) {
            joinType = "leftanti";
            Dataset<Row> temp = dataset1;
            dataset1 = dataset2;
            dataset2 = temp;
        } else if ("全外连接".equals(joinTypeStr)) {
            joinType = "fullouter";
        }

        Dataset<Row> tuple2Dataset = null;
        tuple2Dataset = dataset1.join(dataset2,
                columnList.stream().reduce(Column::and).orElse(functions.lit(true)),
                joinType).toDF();

        return SparkResult.success(tuple2Dataset);
    }

    @Override
    public SparkNodeEnum getType() {
        return SparkNodeEnum.JOIN;
    }
}
