package com.swsc.ai.config;

import com.swsc.ai.constant.EnvConstant;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.util.*;

/**
 * @author QF
 * @date 2023/10/11 10:49
 * @describe sparkSql配置类
 */
public class SparkSqlConf {
    public static Map<String, String> envMap = new HashMap<>();


    /**
     * 获取session
     *
     * @param appName
     * @return
     */
    public static SparkSession getSession(String appName) {
        SparkSession sparkSession = SparkSession.builder().appName(appName).master(envMap.get("SPARK")).getOrCreate();
        return sparkSession;
    }

    /**
     * 关闭session
     *
     * @param sparkSession
     */
    public static void closeSession(SparkSession sparkSession) {
        sparkSession.close();
    }

    /**
     * 获取数据集
     *
     * @param session
     * @param table
     * @return
     */
    public static Dataset<Row> getData(SparkSession session, String table) {
        return session.read().format("jdbc").option("url", envMap.get("URL")).option("driver", envMap.get("DRIVER")).option("dbtable", table).option("user", envMap.get("USER")).option("password", envMap.get("PASSWORD")).load();
    }

    /**
     * 获取数据集
     *
     * @param session
     * @param table
     * @return
     */
    public static Dataset<Row> getDataBySql(SparkSession session, String table, String where, String col, String... cols) {
        Properties props = new Properties();
        props.setProperty("user", envMap.get("USER"));
        props.setProperty("password", envMap.get("PASSWORD"));
        props.setProperty("driver", envMap.get("DRIVER"));
        try {
            return session.read().format("jdbc").jdbc(envMap.get("URL"), table, props).select(col, cols).where(where);
        } catch (Exception e) {
            System.out.println("查询数据异常：" + e.getMessage());
            String colStr = col + "," + String.join(",", cols);
            return generateEmptyDataset(session, colStr);
        }
    }

    /**
     * 获取数据集
     *
     * @param session
     * @param table
     * @return
     */
    public static Dataset<Row> getDataByPartition(SparkSession session, String table, String partColumn, String selectColumns, String condition) {
        Map<String, String> dbOptions = new HashMap<>();
        dbOptions.put("driver", envMap.get("DRIVER"));
        dbOptions.put("url", envMap.get("URL"));
        dbOptions.put("user", envMap.get("USER"));
        dbOptions.put("password", envMap.get("PASSWORD"));

        //查询需要的数据
        String dataSelect = String.format("select %s from %s ", selectColumns, table);
        dataSelect += condition;
        Map<String, String> options = new HashMap<>();
        options.put("query", dataSelect);
        options.put("numPartitions", String.valueOf(EnvConstant.PARTITION_NUM));
        options.putAll(dbOptions);
        try {
            return session.read().format("jdbc").options(options).load();
        } catch (Exception e) {
            System.out.println("分区查询数据异常：" + e.getMessage());
            Dataset<Row> emptyDataset = generateEmptyDataset(session, selectColumns);
            emptyDataset.show();
            return emptyDataset;
        }
    }

    /**
     * 动态创建空Dataset
     * @param session
     * @param selectColumns
     * @return
     */
    public static Dataset<Row> generateEmptyDataset(SparkSession session, String selectColumns) {
        StructType schema = new StructType();
        if (StringUtils.isEmpty(selectColumns)) {
            schema = schema.add(new StructField("emptyDataset", DataTypes.StringType, false, Metadata.empty()));
            return session.createDataset(Collections.emptyList(), Encoders.row(schema));
        }
        String[] split = selectColumns.split(",");
        for (String str : split) {
            String res = str.trim();
            schema = schema.add(new StructField(res, DataTypes.StringType, false, Metadata.empty()));
        }
        return session.createDataset(Collections.emptyList(), Encoders.row(schema));
    }

    /**
     * 执行完整sql获取数据
     * @param session
     * @param sql
     * @return
     */
    public static Dataset<Row> getDataByCompleteSQL(SparkSession session, String sql) {
        Map<String, String> dbOptions = new HashMap<>();
        dbOptions.put("driver", envMap.get("DRIVER"));
        dbOptions.put("url", envMap.get("URL"));
        dbOptions.put("user", envMap.get("USER"));
        dbOptions.put("password", envMap.get("PASSWORD"));
        Map<String, String> options = new HashMap<>();
        options.put("query", sql);
        options.put("numPartitions", String.valueOf(EnvConstant.PARTITION_NUM));
        options.putAll(dbOptions);
        return session.read().format("jdbc").options(options).load();
    }

    /**
     * 根据cvs获取数据集
     *
     * @param session
     * @param path
     * @return
     */
    public static Dataset<Row> getDataByCVS(SparkSession session,String structType, String path) {
        return session.read().schema(StructType.fromDDL(structType)).csv(path).offset(1);
    }
}
