package com.cl.spark.node;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.cl.spark.base.BaseSparkNode;
import com.cl.spark.dto.SparkParam;
import com.cl.spark.dto.SparkResult;
import com.cl.spark.enums.SparkNodeEnum;
import com.cl.spark.util.SparkUtil;
import org.apache.parquet.Strings;
import org.apache.spark.sql.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;

import java.util.*;
import java.util.stream.Collectors;

import static org.apache.spark.sql.functions.lit;

@Component
public class CreateTableNode extends BaseSparkNode {
    @Autowired
    SparkSession sparkSession;

    @Override
    public SparkResult process(SparkParam sparkParam) {
        JSONObject expression = sparkParam.getNodeExpression();
        String tableName = expression.getString("tableName");
        String url = expression.getString("url");
        String username = expression.getString("username");
        String password = expression.getString("password");
        Dataset<Row> dataset = sparkParam.getSparkResultList().get(0).getDataset();

        Properties dbProperties = new Properties() {
            {
                setProperty("user", username);
                setProperty("password", password);
            }
        };

        Dataset<Row> latestIdDataset = sparkSession.read()
                .jdbc(url, tableName, dbProperties)
                .select(functions.max("id")); // 假设"id"是表中的自增ID字段

        long latestId = 0;
        if (!latestIdDataset.isEmpty()) {
            latestIdDataset.first().getAs("max(id)"); // 获取最大ID，默认值为0
            dataset = dataset.withColumn("DEFAULT_UNIQUE_KEY", functions.col("id"));
        }
        JSONArray uniqueColumns = expression.getJSONArray("uniqueColumns");
        if (uniqueColumns == null || uniqueColumns.isEmpty()) {
            uniqueColumns = new JSONArray();
            uniqueColumns.add("DEFAULT_UNIQUE_KEY");
        }

        List<String> uniqueColumnsList = new ArrayList<>();
        for (int i = 0; i < uniqueColumns.size(); i++) {
            uniqueColumnsList.add(uniqueColumns.getString(i));
        }
        Dataset<Row> existingRecords = sparkSession.read()
                .jdbc(url, "(" +
                        "SELECT " + Strings.join(uniqueColumnsList, ",") + " FROM " + tableName +
                        ") AS t", dbProperties);

        Dataset<Row> stringDataset = SparkUtil.toStringDataset(SparkUtil.filterByUniqueColumns(dataset, existingRecords, uniqueColumnsList), "id");
        SaveMode saveMode = SaveMode.Append;
        stringDataset.write()
                .mode(saveMode)
                .jdbc(url, tableName, dbProperties);


        List<String> columns = new ArrayList<>(Collections.unmodifiableList(Arrays.asList(dataset.columns())));
        if (columns.contains("ID")) {
            columns.set(columns.indexOf("ID"), "id");
        } else if (!columns.contains("id")) {
            columns.add("id");
        }
        String columnsStr = Strings.join(columns, ",");

        Dataset<Row> res = sparkSession.read()
                .format("jdbc")
                .option("url", url)
                .option("dbtable", "( select " + columnsStr + " from " + tableName + " where id > " + latestId + " ) as temp_" + tableName)
                .option("user", username)
                .option("password", password)
                .load();
        return SparkResult.success(res);
    }

    @Override
    public SparkNodeEnum getType() {
        return SparkNodeEnum.CREATE_TABLE;
    }

}
