package com.zz.util;

import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.util.Properties;

public class SparkMySQLToHDFSToKafka {

    public static void main(String[] args) {
        // 1. 初始化SparkSession
        SparkConf conf = new SparkConf()
                .setAppName("SparkMySQLToHDFSToKafka")
                .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .setMaster("local[*]"); // 生产环境去掉setMaster或使用集群管理器

        SparkSession spark = SparkSession.builder()
                .config(conf)
                .getOrCreate();

        try {
            // 2. 从MySQL读取配置数据
            Dataset<Row> mysqlConfigDF = readFromMySQL(spark);

            // 打印配置数据
            System.out.println("从MySQL读取的配置数据：");
            mysqlConfigDF.show();
            // 3. 处理每条配置记录
            mysqlConfigDF.collectAsList().forEach(row -> {
                try {
                    int tableName = row.getAs("id");
                    String whereCondition = row.getAs("where_condition");

                    System.out.println("处理配置 - if: " + tableName);

                    // 4. 读取HDFS数据
//                    Dataset<Row> hdfsDataDF = readFromHDFS(spark, "/origin_data/gmall/log/topic_log/2024-06-14");
                    Dataset<Row> hdfsDataDF = readFromHDFS(spark, "hdfs://origin_data/gmall/log/topic_log/2024-06-14");

                    // 5. 创建临时视图
                    hdfsDataDF.createOrReplaceTempView("temp_data");

                    // 6. 构建SQL查询
                    String query = "SELECT * FROM temp_data";
                    if (whereCondition != null && !whereCondition.isEmpty()) {
//                        String sparkCondition = convertMySqlToSparkCondition(whereCondition);
//                        query += " WHERE " + whereCondition;
                    }

                    // 执行SQL查询
                    Dataset<Row> filteredDF = spark.sql(query);

                    // 打印数据量
                    System.out.println("读取到数据量: " + filteredDF.count());
                    filteredDF.show(5);

                    // 6. 将数据写入Kafka
                    writeToKafka(filteredDF);

                } catch (Exception e) {
                    System.err.println("处理配置记录出错: " + row);
                    e.printStackTrace();
                }
            });

        } catch (Exception e) {
            System.err.println("主流程出错:");
            e.printStackTrace();
        } finally {
            spark.stop();
        }
    }

    /**
     * 从MySQL读取配置数据
     */
    private static Dataset<Row> readFromMySQL(SparkSession spark) {
        String jdbcUrl = "jdbc:mysql://hadoop101:3306/finder";
        String table = "finder_sql";

        Properties connectionProperties = new Properties();
        connectionProperties.put("user", "root");
        connectionProperties.put("password", "123456");
        connectionProperties.put("driver", "com.mysql.jdbc.Driver");

        return spark.read()
                .jdbc(jdbcUrl, table, connectionProperties)
                .select("id", "where_condition");
    }

    /**
     * 从HDFS读取数据
     */
    private static Dataset<Row> readFromHDFS(SparkSession spark, String hdfsPath) {
        // 根据文件类型选择读取方式
        if (hdfsPath.endsWith(".parquet")) {
            return spark.read().parquet(hdfsPath);
        } else if (hdfsPath.endsWith(".csv")) {
            return spark.read()
                    .option("header", "true")
                    .option("inferSchema", "true")
                    .csv(hdfsPath);
        } else if (hdfsPath.endsWith(".json")) {
            return spark.read().json(hdfsPath);
        } else {
            return spark.read().json(hdfsPath);
        }
    }

    /**
     * 将数据写入Kafka
     */
    private static void writeToKafka(Dataset<Row> dataDF) throws ClassNotFoundException {
        String kafkaBootstrapServers = "hadoop101:9092,hadoop102:9092,hadoop103:9093";
        String topic = "finder";

        // 将DataFrame转换为JSON格式字符串
        Dataset<String> jsonDF = dataDF.toJSON();

        Class.forName("org.apache.spark.kafka010.KafkaConfigUpdater");
        System.out.println("Kafka classes loaded successfully");

        // 写入Kafka
        jsonDF.write()
                .format("kafka")
                .option("kafka.bootstrap.servers", kafkaBootstrapServers)
                .option("topic", topic)
//                .option("kafka.client.dns.lookup", "use_all_dns_ips") // Kafka 3.0+ 专用
//                .("CAST(key AS STRING)", "CAST(value AS STRING)")
                // 关键性能参数
//                .option("kafka.batch.size", "16384")
//                .option("kafka.linger.ms", "5")
                // 安全认证（如果需要）
//                .option("kafka.security.protocol", "SASL_SSL")
//                .option("kafka.sasl.mechanism", "PLAIN")
//                .option("kafka.sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='user' password='pwd';")
                .save();

        System.out.println("成功写入Kafka，记录数: " + jsonDF.count());
    }

    // MySQL条件转换为Spark SQL条件的辅助方法
    private static String convertMySqlToSparkCondition(String mysqlCondition) {
        // 替换MySQL特定函数为Spark SQL等效函数
        String sparkCondition = mysqlCondition
                .replace("NOW()", "current_timestamp()")
                .replace("DATE_SUB(", "date_sub(")
                .replace("INTERVAL ", "");

        // 处理其他可能的MySQL特定语法
        // 可以根据需要添加更多替换规则

        return sparkCondition;
    }

}