package org.example.utils;

import com.google.common.collect.ImmutableMap;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.StructField;

import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Properties;
import java.util.stream.Collectors;

/**
 * MysqlUtils_3 工具类，用于读取和写入 MySQL 数据表
 */
public class MysqlUtils_3 {

    private static final String URL = ConfigProperties.getInstance().getProperty("mysql-url");
    private static final String USER = ConfigProperties.getInstance().getProperty("mysql_user");
    private static final String PASSWORD = ConfigProperties.getInstance().getProperty("mysql_pwd");
    private static final String DRIVER = ConfigProperties.getInstance().getProperty("mysql_driver");
    private static final int DEFAULT_BATCH_SIZE = 1000;

    /**
     * 初始化 MySQL 连接并将数据表作为临时视图加载到 Spark 中
     *
     * @param spark          SparkSession 对象
     * @param mysqlTableName MySQL 数据表名称
     * @return String 临时视图名称
     * @throws IOException 抛出文件未找到异常
     */
    public static String readMysqlToTempTable(SparkSession spark, String mysqlTableName) throws IOException {
        String tempTableName = mysqlTableName;
        spark.read().format("jdbc")
                .option("url", URL)
                .option("driver", DRIVER)
                .option("user", USER)
                .option("password", PASSWORD)
                .option("dbtable", mysqlTableName)
                .option("dateFormat", "yyyy-MM-dd") // 根据需要设置日期格式
                .option("timestampFormat", "yyyy-MM-dd HH:mm:ss") // 根据需要设置时间戳格式
                .load().createOrReplaceTempView(tempTableName);
        return tempTableName;
    }

    /**
     * 读取 MySQL 数据表
     *
     * @param spark     SparkSession 对象
     * @param tableName MySQL 数据表名称
     * @return Dataset<Row> 返回数据集
     */
    public static Dataset<Row> readMysqlTableDs(SparkSession spark, String tableName) throws IOException {
        Dataset<Row> dataSet = spark.read().format("jdbc")
                .option("url", URL)
                .option("driver", DRIVER)
                .option("dbtable", tableName)
                .option("user", USER)
                .option("password", PASSWORD)
                .option("dateFormat", "yyyy-MM-dd") // 根据需要设置日期格式
                .option("timestampFormat", "yyyy-MM-dd HH:mm:ss") // 根据需要设置时间戳格式
                .load();
        return dataSet;
    }


    /***
     *  读取 Hbase中的表: LOCOMOTIVE_ADS.TRAIN_STATUS,并形成临时表,这个表的表名称: createTempTableName变量自己给定.表中字段如下:
     *  +----------+---------+-------+-------------------+------------+
     *  |s_train_id|loco_type|loco_no|cksj               |train_status|
     *  +----------+---------+-------+-------------------+------------+
     *  |HXD2-0001 |HXD2     |0001   |2023-09-19 03:39:15|0           |
     *  |HXD2-0002 |HXD2     |0002   |2023-09-19 03:39:15|0           |
     *  |HXD2-0003 |HXD2     |0003   |2023-09-19 03:39:15|0           |
     *  |HXD2-0010 |HXD2     |0010   |2023-09-19 03:39:15|0           |
     *  +----------+---------+-------+-------------------+------------+
     *  调用方式如下: MysqlUtils_3.loadTrainStatusFromPhoenix(spark, "in ('HXD2')", "aei_run");
     * @param spark
     * @param cxFilterSql
     * @param createTempTableName
     */
    public static void loadTrainStatusFromPhoenix(SparkSession spark, String cxFilterSql, String createTempTableName) {
        spark.read().format("jdbc")
                .options(ImmutableMap.of(
                        "driver", "org.apache.phoenix.jdbc.PhoenixDriver",
                        "url", "jdbc:phoenix:JWGL-YY-T141149,JWGL-YY-T141150,JWGL-YY-T141151,JWGL-YY-T141152,JWGL-YY-V139150,JWGL-YY-V139151,JWGL-YY-V139152,JWGL-YY-V139153:2181:/hbase-unsecure",
                        "dbtable", "LOCOMOTIVE_ADS.TRAIN_STATUS"))
                .load()
                .selectExpr("s_train_id", "jccx as loco_type", "ch as loco_no", "train_status_time as cksj", "train_status")
                .filter("train_status = '0' and loco_type " + cxFilterSql + "")
                .createOrReplaceTempView(createTempTableName);
    }

    /**
     * 将数据集写入 MySQL 数据表
     *
     * @param dataSet   要写入的数据集
     * @param tableName MySQL 数据表名称
     * @param saveMode  写入模式（追加、覆盖、忽略、出错）
     */
    public static void writeDataToMysqlTable(Dataset<Row> dataSet, String tableName, SaveMode saveMode) {
        writeDataToMysqlTableInBatches(dataSet, tableName, saveMode, DEFAULT_BATCH_SIZE);
    }

    /**
     * 批量将数据集写入 MySQL 数据表
     *
     * @param dataSet   要写入的数据集
     * @param tableName MySQL 数据表名称
     * @param saveMode  写入模式（追加、覆盖、忽略、出错）
     * @param batchSize 每批写入的记录数
     */
    public static void writeDataToMysqlTableInBatches(Dataset<Row> dataSet, String tableName, SaveMode saveMode, int batchSize) {
        Properties connectionProperties = new Properties();
        connectionProperties.put("url", URL);
        connectionProperties.put("driver", DRIVER);
        connectionProperties.put("user", USER);
        connectionProperties.put("password", PASSWORD);
        connectionProperties.put("batchsize", String.valueOf(batchSize));

        dataSet.write()
                .mode(saveMode)
                .jdbc(URL, tableName, connectionProperties);
    }

    public static void upsertDatasetToMySQL(Dataset<Row> dataset, String tableName) throws IOException {
        StructField[] fields = dataset.schema().fields();
        String[] columnNames = Arrays.stream(fields).map(StructField::name).toArray(String[]::new);

        dataset.foreachPartition(partition -> {
            try (Connection connection = DriverManager.getConnection(URL, USER, PASSWORD)) {
                connection.setAutoCommit(false); // Disable auto-commit for batch processing

                // 构建 SQL 语句
                String columns = String.join(", ", columnNames);
                String placeholders = String.join(", ", Collections.nCopies(columnNames.length, "?"));
                String updateClause = Arrays.stream(columnNames)
                        .map(column -> column + " = VALUES(" + column + ")")
                        .collect(Collectors.joining(", "));
                String sql = "INSERT INTO " + tableName + " (" + columns + ") VALUES (" + placeholders + ") " +
                        "ON DUPLICATE KEY UPDATE " + updateClause;

                try (PreparedStatement preparedStatement = connection.prepareStatement(sql)) {
                    int batchSize = DEFAULT_BATCH_SIZE; // Define your desired batch size,默认是 1000
                    int count = 0;

                    while (partition.hasNext()) {
                        Row row = partition.next();
                        for (int i = 0; i < columnNames.length; i++) {
                            Object value = row.getAs(columnNames[i]);
                            preparedStatement.setObject(i + 1, value);
                        }
                        preparedStatement.addBatch();
                        count++;

                        if (count % batchSize == 0) {
                            preparedStatement.executeBatch();
                        }
                    }
                    if (count % batchSize != 0) {
                        preparedStatement.executeBatch(); // Insert remaining records
                    }
                    connection.commit(); // Commit the transaction
                } catch (SQLException e) {
                    connection.rollback(); // Rollback in case of an error
                    e.printStackTrace();
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        });
    }
}
