package com.swsc.ai.config;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.Properties;

/**
 * @author QF
 * @date 2023/10/11 10:49
 * @describe sparkSql配置类
 */
public class SparkSqlConfBk {

    private static final String URL = "jdbc:mysql://172.18.10.18:33017/tp_sync";
    private static final String DRIVER = "com.mysql.cj.jdbc.Driver";
    private static final String USER = "tp_dba";
    private static final String PASSWORD = "tp@swsc600369";

//    private static final String URL = "jdbc:mysql://127.0.0.1:3306/tp_sync";
//    private static final String DRIVER = "com.mysql.jdbc.Driver";
//    private static final String USER = "root";
//    private static final String PASSWORD = "123456";

//  分片数
    private static final Integer PARTITION_NUM = 3;

    /**
     * 获取session
     * @param appName
     * @return
     */
    public static SparkSession getSession(String appName) {
        SparkSession sparkSession = SparkSession.builder()
                .appName(appName)
                .master("yarn")
                .getOrCreate();
        return sparkSession;
    }

    /**
     * 关闭session
     * @param sparkSession
     */
    public static void closeSession(SparkSession sparkSession) {
        sparkSession.close();
    }

    /**
     * 获取数据集
     * @param session
     * @param table
     * @return
     */
    public static Dataset<Row> getData(SparkSession session, String table) {
        return session.read().format("jdbc")
                .option("url", URL)
                .option("driver", DRIVER)
                .option("dbtable", table)
                .option("user", USER)
                .option("password", PASSWORD)
                .load();
    }

    /**
     * 获取数据集
     * @param session
     * @param table
     * @return
     */
    public static Dataset<Row> getDataBySql(SparkSession session, String table, String where, String col, String... cols) {
        Properties props = new Properties();
        props.setProperty ("user", USER);
        props.setProperty ("password", PASSWORD);
        props.setProperty("driver",DRIVER);
        return session.read()
                .format("jdbc")
                .jdbc(URL,table,props)
                .select(col, cols)
                .where(where);
    }

    /**
     * 获取数据集
     * @param session
     * @param table
     * @return
     */
    public static Dataset<Row> getDataByPartition(SparkSession session, String table, String partitionCol,  String where, String col, String... cols) {
        Properties props = new Properties();
        props.setProperty ("user", USER);
        props.setProperty ("password", PASSWORD);
        props.setProperty("driver",DRIVER);

        //查询分片字段最大的数
        HashMap<String, String> mapMax = new HashMap<>();
        mapMax.put(partitionCol,"max");
        Dataset<Row> rowDatasetMax = session.read()
                .format("jdbc")
                .jdbc(URL, table, props)
                .select(col, cols)
                .where(where)
                .agg(mapMax);
        int max = rowDatasetMax.first().getInt(0);
        HashMap<String, String> mapMin = new HashMap<>();
        mapMin.put(partitionCol,"min");
        Dataset<Row> rowDatasetMin = session.read()
                .format("jdbc")
                .jdbc(URL, table, props)
                .select(col, cols)
                .where(where)
                .agg(mapMin);
        int min = rowDatasetMin.first().getInt(0);
        int lenth = max - min + 1;
        //每一片数据大小
        int size = lenth % PARTITION_NUM == 0 ? max / PARTITION_NUM : max / PARTITION_NUM + 1;
        ArrayList<String> list = new ArrayList<>();
        for (int i = 0; i < PARTITION_NUM; i++) {
            // 考虑到mysql在超大数据量查询时limit的性能问题，建议用这种方式进行limit分页
            list.add(String.format("1=1 and %s >= %d AND %s < %d", partitionCol, i * size + min, partitionCol, i == PARTITION_NUM - 1 ? (i + 1) * size + min + 1 :(i + 1) * size + min));
        }
        System.out.println(list);
        return session.read()
                .format("jdbc")
                .jdbc(URL,table,list.toArray(new String[list.size()]),props)
                .select(col, cols)
                .where(where);
    }
}
