package com.cl.spark.node;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.cl.spark.base.BaseSparkNode;
import com.cl.spark.dto.SparkParam;
import com.cl.spark.dto.SparkResult;
import com.cl.spark.enums.SparkNodeEnum;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.functions;
import org.springframework.stereotype.Component;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

@Component
public class JoinNode extends BaseSparkNode {

    @Override
    public SparkResult process(SparkParam sparkParam) {
        JSONObject expression = sparkParam.getNodeExpression();
        String joinTypeStr = expression.getString("joinType");
        String joinType = "inner";
        if ("左连接".equals(joinTypeStr)) {
            joinType = "leftouter";
        } else if ("右连接".equals(joinTypeStr)) {
            joinType = "rightouter";
        }


        List<String> fields1 = expression.getJSONArray("fields1").toJavaList(String.class);
        List<String> fields2 = expression.getJSONArray("fields2").toJavaList(String.class);

        List<Dataset<Row>> datasetList = sparkParam.getDatasetList();

        Dataset<Row> dataset1 = datasetList.get(0);
        Dataset<Row> dataset2 = datasetList.get(1);
        List<Column> columnList = new ArrayList<>();
        for (int i = 0; i < fields1.size(); i++) {
            Column column = dataset1.col(fields1.get(i)).equalTo(dataset2.col(fields2.get(i)));
            columnList.add(column);
        }
        Dataset<Row> tuple2Dataset;
        try {
            tuple2Dataset = dataset1.as("df1").join(dataset2.as("df2"),
                    columnList.stream().reduce(Column::and).orElse(functions.lit(true)), // 如果fields1List为空，则总是为真
                    joinType).toDF();
        } catch (Exception e) {
            tuple2Dataset = dataset2.as("df1").join(dataset1.as("df2"),
                    columnList.stream().reduce(Column::and).orElse(functions.lit(true)), // 如果fields1List为空，则总是为真
                    joinType).toDF();
            e.printStackTrace();
        }

        return SparkResult.success(tuple2Dataset);
    }

    @Override
    public SparkNodeEnum getType() {
        return SparkNodeEnum.JOIN;
    }
}
