package top.reminis.flink;

import lombok.extern.slf4j.Slf4j;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.StatementSet;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import top.reminis.conf.JobConfig;
import top.reminis.conf.JobConfigLoader;
import top.reminis.monitor.UniversalJobMonitor;
import top.reminis.utils.MinIOTestUtils;
import top.reminis.utils.ConfigurableSqlGenerator;
import top.reminis.utils.ConfigValidator;

import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;

/**
 * 通用Flink任务框架
 * 支持通过配置文件驱动处理不同数据源和目标
 * 
 * @author zwf
 * @create 2025-09-24
 */
@Slf4j
public class UniversalFlinkJob {

    private static JobConfig jobConfig;
    private static volatile boolean isRunning = true;
    private static ScheduledExecutorService scheduler;

    public static void main(String[] args) {
        // 优先使用代码中定义的配置文件，如果传入了参数则使用参数
        String configFile = null;
        if (args.length > 0) {
            configFile = args[0];
            log.info("使用命令行参数指定的配置文件: {}", configFile);
        } else {
            log.info("使用代码中定义的配置文件: {}", configFile);
        }

        log.info("开始启动通用Flink任务，配置文件: {}", configFile);

        if (configFile == null) {
            log.warn("Flink配置文件为空，退出任务");
            cleanupAndExit(2);
        }
        
        try {
            // 步骤 1: 加载配置文件
            loadJobConfig(configFile);

            // 步骤 2: 验证配置完整性
            validateJobConfiguration();

            // 步骤 3: 打印和验证配置
            printAndValidateConfigurations();

            // 步骤 3: 初始化Hadoop配置
            initializeHadoopConfiguration();

            // 步骤 4: 创建Flink环境
            StreamTableEnvironment tEnv = createFlinkEnvironment();

            // 步骤 5: 注册数据源和目标
            registerDataSourcesAndSinks(tEnv);

            // 步骤 6: 注册优雅停机钩子
            registerShutdownHook();

            // 步骤 7: 初始化监控系统
            UniversalJobMonitor.initialize(
                jobConfig.getJobName(),
                jobConfig.getDescription(),
                jobConfig.getMonitoringInterval()
            );
            UniversalJobMonitor.recordJobStart();
            
            // 步骤 8: 启动监控线程
            startStatisticsMonitor();

            // 步骤 9: 执行数据处理作业
            executeDataProcessingJob(tEnv);

            log.info("=== {} 作业启动成功，开始处理数据... ===", jobConfig.getJobName());

            // 阻塞主线程，使作业持续运行
            waitForTermination();

        } catch (Exception e) {
            log.error("作业 {} 启动失败，发生致命错误: {}", jobConfig.getJobName(), e.getMessage(), e);
            UniversalJobMonitor.recordError();
            cleanupAndExit(1);
        }
    }

    /**
     * 加载作业配置
     */
    private static void loadJobConfig(String configFile) {
        log.info("=== 1. 加载作业配置 ===");
        jobConfig = JobConfigLoader.load(configFile);
        log.info("作业配置加载完成: {}", jobConfig.getJobName());
        log.info("=========================");
    }

    /**
     * 验证作业配置完整性
     */
    private static void validateJobConfiguration() {
        log.info("=== 2. 验证配置完整性 ===");
        try {
            ConfigValidator.validateJobConfig(jobConfig);
            log.info("配置验证通过");
        } catch (Exception e) {
            log.error("配置验证失败: {}", e.getMessage());
            throw e;
        }
        log.info("=========================");
    }

    /**
     * 打印并验证所有关键配置
     */
    private static void printAndValidateConfigurations() {
        log.info("=== 2. 配置信息审查 ===");
        
        // 验证作业基本信息
        validateJobConfig();
        
        // 打印作业基本信息
        log.info("[作业] 名称: {}", jobConfig.getJobName());
        log.info("[作业] 描述: {}", jobConfig.getDescription());
        log.info("[作业] 并行度: {}", jobConfig.getParallelism());
        log.info("[作业] 检查点间隔: {}ms", jobConfig.getCheckpointInterval());
        
        // 验证并打印数据源配置
        validateDataSources();
        for (JobConfig.DataSourceConfig source : jobConfig.getDataSources()) {
            log.info("[数据源] 名称: {}, 类型: {}, 连接: {}", 
                source.getName(), source.getType(), source.getConnectionString());
        }
        
        // 验证并打印数据目标配置
        validateDataSinks();
        for (JobConfig.DataSinkConfig sink : jobConfig.getDataSinks()) {
            log.info("[数据目标] 名称: {}, 类型: {}, 连接: {}", 
                sink.getName(), sink.getType(), sink.getConnectionString());
        }
        
        // 验证并打印存储配置
        validateStorageConfig();
        JobConfig.StorageConfig storage = jobConfig.getStorage();
        if (storage != null) {
            log.info("[存储] 类型: {}, 端点: {}, 桶: {}", 
                storage.getType(), storage.getEndpoint(), storage.getBucket());
            
            // 测试存储连接
            if ("minio".equalsIgnoreCase(storage.getType())) {
                if (!MinIOTestUtils.testConnection(storage.getEndpoint(), 
                    storage.getAccessKey(), storage.getSecretKey(), storage.getBucket())) {
                    log.error("MinIO连接测试失败！请检查配置。");
                    throw new RuntimeException("无法连接到MinIO服务，作业终止。");
                }
                log.info("MinIO连接测试成功。");
            }
        }
        
        log.info("=========================");
    }

    /**
     * 初始化Hadoop相关配置
     */
    private static void initializeHadoopConfiguration() {
        log.info("=== 3. 初始化Hadoop配置 ===");
        
        JobConfig.StorageConfig storage = jobConfig.getStorage();
        if (storage != null && "minio".equalsIgnoreCase(storage.getType())) {
            // 设置系统属性
            System.setProperty("fs.s3a.endpoint", storage.getEndpoint());
            System.setProperty("fs.s3a.access.key", storage.getAccessKey());
            System.setProperty("fs.s3a.secret.key", storage.getSecretKey());
            System.setProperty("fs.s3a.path.style.access", "true");
            System.setProperty("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
            System.setProperty("fs.s3a.connection.ssl.enabled", "false");
            System.setProperty("fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider");
            
            // 设置Hadoop配置
            org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
            hadoopConf.set("fs.s3a.endpoint", storage.getEndpoint());
            hadoopConf.set("fs.s3a.access.key", storage.getAccessKey());
            hadoopConf.set("fs.s3a.secret.key", storage.getSecretKey());
            hadoopConf.set("fs.s3a.path.style.access", "true");
            hadoopConf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
            hadoopConf.set("fs.s3a.connection.ssl.enabled", "false");
            hadoopConf.set("fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider");
            
            // 设置环境变量
            System.setProperty("HADOOP_CONF_DIR", System.getProperty("java.io.tmpdir"));
            
            log.info("Hadoop S3A配置初始化完成。");
            log.info("S3A Endpoint: {}", storage.getEndpoint());
            log.info("S3A Access Key: {}", storage.getAccessKey());
            log.info("S3A Secret Key: {}", storage.getSecretKey().length() > 0 ? "***" : "空");
        }
        
        log.info("============================");
    }

    /**
     * 验证作业基本配置
     */
    private static void validateJobConfig() {
        if (jobConfig.getJobName() == null || jobConfig.getJobName().trim().isEmpty()) {
            throw new IllegalStateException("作业名称不能为空，请在配置文件中设置 job.name");
        }
        if (jobConfig.getParallelism() <= 0) {
            throw new IllegalStateException("并行度必须大于0，当前值: " + jobConfig.getParallelism());
        }
        if (jobConfig.getCheckpointInterval() <= 0) {
            throw new IllegalStateException("检查点间隔必须大于0，当前值: " + jobConfig.getCheckpointInterval());
        }
    }

    /**
     * 验证数据源配置
     */
    private static void validateDataSources() {
        if (jobConfig.getDataSources() == null || jobConfig.getDataSources().isEmpty()) {
            throw new IllegalStateException("至少需要一个数据源配置，请在配置文件中添加 data_sources");
        }
        
        for (JobConfig.DataSourceConfig source : jobConfig.getDataSources()) {
            if (source.getName() == null || source.getName().trim().isEmpty()) {
                throw new IllegalStateException("数据源名称不能为空");
            }
            if (source.getType() == null || source.getType().trim().isEmpty()) {
                throw new IllegalStateException("数据源类型不能为空，数据源: " + source.getName());
            }
            if (source.getFields() == null || source.getFields().isEmpty()) {
                throw new IllegalStateException("数据源 " + source.getName() + " 缺少字段定义，请在配置文件中添加 fields");
            }
            
            // 验证 Kafka 数据源
            if ("kafka".equalsIgnoreCase(source.getType())) {
                if (source.getTopic() == null || source.getTopic().trim().isEmpty()) {
                    throw new IllegalStateException("Kafka 数据源 " + source.getName() + " 缺少 topic 配置");
                }
                if (source.getBootstrapServers() == null || source.getBootstrapServers().trim().isEmpty()) {
                    throw new IllegalStateException("Kafka 数据源 " + source.getName() + " 缺少 bootstrap_servers 配置");
                }
                if (source.getGroupId() == null || source.getGroupId().trim().isEmpty()) {
                    throw new IllegalStateException("Kafka 数据源 " + source.getName() + " 缺少 group_id 配置");
                }
            }
        }
    }

    /**
     * 验证数据目标配置
     */
    private static void validateDataSinks() {
        if (jobConfig.getDataSinks() == null || jobConfig.getDataSinks().isEmpty()) {
            throw new IllegalStateException("至少需要一个数据目标配置，请在配置文件中添加 data_sinks");
        }
        
        for (JobConfig.DataSinkConfig sink : jobConfig.getDataSinks()) {
            if (sink.getName() == null || sink.getName().trim().isEmpty()) {
                throw new IllegalStateException("数据目标名称不能为空");
            }
            if (sink.getType() == null || sink.getType().trim().isEmpty()) {
                throw new IllegalStateException("数据目标类型不能为空，数据目标: " + sink.getName());
            }
            if (sink.getFields() == null || sink.getFields().isEmpty()) {
                throw new IllegalStateException("数据目标 " + sink.getName() + " 缺少字段定义，请在配置文件中添加 fields");
            }
            
            // 验证 Iceberg 数据目标
            if ("iceberg".equalsIgnoreCase(sink.getType())) {
                if (sink.getTableName() == null || sink.getTableName().trim().isEmpty()) {
                    throw new IllegalStateException("Iceberg 数据目标 " + sink.getName() + " 缺少 table_name 配置");
                }
            }
            
            // 验证 Kafka 数据目标（死信队列）
            if ("kafka".equalsIgnoreCase(sink.getType())) {
                if (sink.getTopic() == null || sink.getTopic().trim().isEmpty()) {
                    throw new IllegalStateException("Kafka 数据目标 " + sink.getName() + " 缺少 topic 配置");
                }
                if (sink.getBootstrapServers() == null || sink.getBootstrapServers().trim().isEmpty()) {
                    throw new IllegalStateException("Kafka 数据目标 " + sink.getName() + " 缺少 bootstrap_servers 配置");
                }
            }
        }
    }

    /**
     * 验证存储配置
     */
    private static void validateStorageConfig() {
        JobConfig.StorageConfig storage = jobConfig.getStorage();
        if (storage == null) {
            throw new IllegalStateException("缺少存储配置，请在配置文件中添加 storage");
        }
        if (storage.getType() == null || storage.getType().trim().isEmpty()) {
            throw new IllegalStateException("存储类型不能为空");
        }
        if (storage.getEndpoint() == null || storage.getEndpoint().trim().isEmpty()) {
            throw new IllegalStateException("存储端点不能为空");
        }
        if (storage.getAccessKey() == null || storage.getAccessKey().trim().isEmpty()) {
            throw new IllegalStateException("存储访问密钥不能为空");
        }
        if (storage.getSecretKey() == null || storage.getSecretKey().trim().isEmpty()) {
            throw new IllegalStateException("存储秘密密钥不能为空");
        }
        if (storage.getBucket() == null || storage.getBucket().trim().isEmpty()) {
            throw new IllegalStateException("存储桶名称不能为空");
        }
    }

    /**
     * 生成创建 Catalog 的 SQL
     */
    private static String generateCreateCatalogSql(String catalogName, String catalogType, Map<String, String> properties) {
        StringBuilder sql = new StringBuilder();
        sql.append("CREATE CATALOG IF NOT EXISTS ").append(catalogName).append(" WITH (\n");
        
        List<String> props = new ArrayList<>();
        props.add("  'type'='" + catalogType + "'");
        properties.forEach((key, value) -> 
            props.add("  '" + key + "'='" + value + "'"));
        sql.append(String.join(",\n", props));
        sql.append("\n)");
        
        return sql.toString();
    }

    /**
     * 创建并配置Flink的StreamTableEnvironment
     */
    private static StreamTableEnvironment createFlinkEnvironment() {
        log.info("=== 4. 创建Flink执行环境 ===");
        
        // 在创建Flink环境之前，确保Hadoop配置已正确设置
        setupHadoopConfigurationForFlink();
        
        EnvironmentSettings settings = EnvironmentSettings.newInstance().inStreamingMode().build();
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        
        // 设置并行度
        env.setParallelism(jobConfig.getParallelism());
        
        // 配置检查点
        env.enableCheckpointing(jobConfig.getCheckpointInterval(), CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointTimeout(jobConfig.getCheckpointTimeout());
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(jobConfig.getMinPauseBetweenCheckpoints());
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(jobConfig.getMaxConcurrentCheckpoints());
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(jobConfig.getTolerableCheckpointFailureNumber());
        
        // 配置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(
            jobConfig.getMaxRestartAttempts(),
            Time.seconds(jobConfig.getRestartDelaySeconds())
        ));

        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings);
        
        // 设置表配置
        Configuration tableConfig = tEnv.getConfig().getConfiguration();
        if (jobConfig.getTableProperties() != null) {
            jobConfig.getTableProperties().forEach(tableConfig::setString);
        }

        log.info("Flink环境创建完成。");
        log.info("============================");
        return tEnv;
    }
    
    /**
     * 为Flink设置Hadoop配置
     */
    private static void setupHadoopConfigurationForFlink() {
        JobConfig.StorageConfig storage = jobConfig.getStorage();
        if (storage != null && "minio".equalsIgnoreCase(storage.getType())) {
            log.info("设置MinIO存储配置...");
            
            // 设置系统属性，这样Flink在集群环境中也能使用这些配置
            System.setProperty("fs.s3a.endpoint", storage.getEndpoint());
            System.setProperty("fs.s3a.access.key", storage.getAccessKey());
            System.setProperty("fs.s3a.secret.key", storage.getSecretKey());
            System.setProperty("fs.s3a.path.style.access", "true");
            System.setProperty("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
            System.setProperty("fs.s3a.connection.ssl.enabled", "false");
            System.setProperty("fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider");
            
            // 设置额外的S3A配置
            System.setProperty("fs.s3a.attempts.maximum", "3");
            System.setProperty("fs.s3a.retry.interval", "1000");
            System.setProperty("fs.s3a.connection.timeout", "10000");
            System.setProperty("fs.s3a.connection.establish.timeout", "5000");
            
            log.info("MinIO配置已通过系统属性设置");
            log.info("S3A Endpoint: {}", storage.getEndpoint());
            log.info("S3A Access Key: {}", storage.getAccessKey());
            log.info("S3A Secret Key: ***");
            
            // 在本地环境中确保MinIO桶和目录存在
            // 在集群环境中，这个检查可能会失败，但不影响作业运行
            try {
                ensureMinIOBucketAndDirectories(storage);
            } catch (Exception e) {
                log.warn("在集群环境中无法检查MinIO桶和目录，但作业仍可继续运行: {}", e.getMessage());
            }
        }
    }
    
    /**
     * 确保MinIO桶和必要目录存在
     * 在集群环境中，这个方法可能会失败，但不影响作业运行
     */
    private static void ensureMinIOBucketAndDirectories(JobConfig.StorageConfig storage) {
        try {
            log.info("确保MinIO桶和目录存在...");
            
            // 检查是否在集群环境中运行
            boolean isClusterEnvironment = isRunningInCluster();
            if (isClusterEnvironment) {
                log.info("检测到集群环境，跳过MinIO桶和目录检查");
                log.info("请确保MinIO桶 '{}' 和目录 'iceberg/' 已存在", storage.getBucket());
                return;
            }
            
            // 在本地环境中，使用MinIO客户端确保桶存在
            boolean bucketExists = top.reminis.utils.MinIOBucketManager.ensureBucketExists(
                storage.getEndpoint(), 
                storage.getAccessKey(), 
                storage.getSecretKey(), 
                storage.getBucket()
            );
            
            if (!bucketExists) {
                log.error("MinIO桶创建失败，无法继续执行");
                throw new RuntimeException("MinIO桶创建失败，作业终止");
            }
            
            // 然后使用Hadoop FileSystem确保warehouse目录存在
            org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
            conf.set("fs.s3a.endpoint", storage.getEndpoint());
            conf.set("fs.s3a.access.key", storage.getAccessKey());
            conf.set("fs.s3a.secret.key", storage.getSecretKey());
            conf.set("fs.s3a.path.style.access", "true");
            conf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
            conf.set("fs.s3a.connection.ssl.enabled", "false");
            conf.set("fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider");
            
            String s3aPath = "s3a://" + storage.getBucket() + "/";
            java.net.URI uri = java.net.URI.create(s3aPath);
            org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(uri, conf);
            
            // 确保iceberg目录存在（与配置文件中的warehouse路径一致）
            try {
                String icebergPath = s3aPath + "iceberg/";
                org.apache.hadoop.fs.Path icebergDirPath = new org.apache.hadoop.fs.Path(icebergPath);
                if (fs.exists(icebergDirPath)) {
                    log.info("iceberg目录已存在");
                } else {
                    log.info("iceberg目录不存在，尝试创建...");
                    boolean created = fs.mkdirs(icebergDirPath);
                    if (created) {
                        log.info("iceberg目录创建成功");
                    } else {
                        log.warn("iceberg目录创建失败，但继续执行");
                    }
                }
            } catch (Exception e) {
                log.warn("创建iceberg目录时发生错误: {}", e.getMessage());
                // 尝试创建一个测试文件来触发目录创建
                try {
                    String testPath = s3aPath + "iceberg/.test";
                    org.apache.hadoop.fs.Path testFile = new org.apache.hadoop.fs.Path(testPath);
                    fs.create(testFile, true).close();
                    fs.delete(testFile, false);
                    log.info("通过测试文件方式创建iceberg目录成功");
                } catch (Exception e2) {
                    log.warn("通过测试文件方式创建iceberg目录也失败: {}", e2.getMessage());
                }
            }
            
            fs.close();
            log.info("MinIO桶和目录检查完成");
            
        } catch (Exception e) {
            log.error("确保MinIO桶和目录存在时发生错误: {}", e.getMessage(), e);
            // 在集群环境中，不抛出异常，只记录警告
            if (isRunningInCluster()) {
                log.warn("在集群环境中无法检查MinIO桶和目录，但作业仍可继续运行");
            } else {
                throw new RuntimeException("MinIO桶和目录检查失败，作业终止", e);
            }
        }
    }
    
    /**
     * 检查是否在集群环境中运行
     */
    private static boolean isRunningInCluster() {
        try {
            // 检查是否有Flink集群相关的系统属性
            String jobManagerAddress = System.getProperty("jobmanager.rpc.address");
            String taskManagerAddress = System.getProperty("taskmanager.host");
            
            // 检查是否有Hadoop相关的类可用
            Class.forName("org.apache.hadoop.conf.Configuration");
            
            return jobManagerAddress != null || taskManagerAddress != null;
        } catch (ClassNotFoundException e) {
            // 如果Hadoop类不可用，说明在集群环境中
            return true;
        } catch (Exception e) {
            // 其他异常，假设在本地环境
            return false;
        }
    }

    /**
     * 注册数据源和目标
     */
    private static void registerDataSourcesAndSinks(StreamTableEnvironment tEnv) {
        log.info("=== 5. 注册数据源和目标 ===");
        
        // 检查是否在集群环境中运行
        boolean isClusterEnvironment = isRunningInCluster();
        if (isClusterEnvironment) {
            log.info("检测到集群环境，跳过表创建");
            log.info("请确保在Flink集群中已预先创建好所需的表");
            log.info("===========================");
            return;
        }
        
        // 创建Catalog
        createCatalogs(tEnv);
        
        // 创建数据源表
        for (JobConfig.DataSourceConfig source : jobConfig.getDataSources()) {
            createDataSourceTable(tEnv, source);
        }
        
        // 创建数据目标表
        for (JobConfig.DataSinkConfig sink : jobConfig.getDataSinks()) {
            createDataSinkTable(tEnv, sink);
        }
        
        log.info("所有数据源和目标注册完成。");
        log.info("===========================");
    }

    /**
     * 创建Catalog
     */
    private static void createCatalogs(StreamTableEnvironment tEnv) {
        // 检查是否在集群环境中运行
        boolean isClusterEnvironment = isRunningInCluster();
        if (isClusterEnvironment) {
            log.info("检测到集群环境，跳过Catalog创建");
            log.info("请确保在Flink集群中已预先配置好Catalog");
            return;
        }
        
        for (JobConfig.CatalogConfig catalog : jobConfig.getCatalogs()) {
            // 为 Iceberg Catalog 构建完整的属性映射
            Map<String, String> catalogProperties = new HashMap<>();
            catalogProperties.putAll(catalog.getProperties());
            
            // 添加 Iceberg Catalog 必需属性
            if ("iceberg".equalsIgnoreCase(catalog.getType())) {
                // 使用原始的 URI（不包含用户名密码）
                catalogProperties.put("uri", catalog.getUri());
                catalogProperties.put("warehouse", catalog.getWarehouse());
                
                // 使用 jdbc.user 和 jdbc.password 属性（参考 RepertorySqlJob.java）
                catalogProperties.put("jdbc.user", catalog.getUsername());
                catalogProperties.put("jdbc.password", catalog.getPassword());
            }
            
            String createCatalogSql = generateCreateCatalogSql(
                catalog.getName(), catalog.getType(), catalogProperties);
            
            log.info("创建Catalog SQL: {}", createCatalogSql);
            tEnv.executeSql(createCatalogSql);
            log.info("Catalog '{}' 已创建，类型: {}", catalog.getName(), catalog.getType());
            
            if (catalog.isDefault()) {
                tEnv.useCatalog(catalog.getName());
                log.info("已切换到默认Catalog: {}", catalog.getName());
            }
        }
    }

    /**
     * 创建数据源表
     */
    private static void createDataSourceTable(StreamTableEnvironment tEnv, JobConfig.DataSourceConfig source) {
        // 设置配置到 SQL 生成器
        ConfigurableSqlGenerator.setJobConfig(jobConfig);
        
        String createSourceSql = ConfigurableSqlGenerator.generateKafkaSourceTable(source);
        
        log.info("创建数据源表 SQL: {}", createSourceSql);
        tEnv.executeSql(createSourceSql);
        log.info("数据源表 '{}' 已创建，类型: {}", source.getName(), source.getType());
    }

    /**
     * 创建数据目标表
     */
    private static void createDataSinkTable(StreamTableEnvironment tEnv, JobConfig.DataSinkConfig sink) {
        // 设置配置到 SQL 生成器
        ConfigurableSqlGenerator.setJobConfig(jobConfig);
        
        // 先删除表（如果存在）
        if ("iceberg".equalsIgnoreCase(sink.getType())) {
            String dropTableSql = "DROP TABLE IF EXISTS " + sink.getTableName();
            log.info("删除现有表 SQL: {}", dropTableSql);
            try {
                tEnv.executeSql(dropTableSql);
                log.info("表 '{}' 已删除", sink.getTableName());
            } catch (Exception e) {
                log.warn("删除表 '{}' 时发生错误（可能表不存在）: {}", sink.getTableName(), e.getMessage());
            }
        }
        
        String createSinkSql = ConfigurableSqlGenerator.generateIcebergSinkTable(sink);
        
        log.info("创建数据目标表 SQL: {}", createSinkSql);
        tEnv.executeSql(createSinkSql);
        log.info("数据目标表 '{}' 已创建，类型: {}", sink.getName(), sink.getType());
    }

    /**
     * 执行数据处理作业
     */
    private static void executeDataProcessingJob(StreamTableEnvironment tEnv) {
        log.info("=== 6. 提交数据处理作业 ===");
        
        StatementSet stmtSet = tEnv.createStatementSet();
        
        // 为每个数据流创建处理SQL
        for (JobConfig.DataFlowConfig flow : jobConfig.getDataFlows()) {
            // 设置配置到 SQL 生成器
            ConfigurableSqlGenerator.setJobConfig(jobConfig);
            
            String insertSql;
            if ("dlq".equals(flow.getTransformationType())) {
                insertSql = ConfigurableSqlGenerator.generateDlqSql(flow);
            } else {
                insertSql = ConfigurableSqlGenerator.generateDataTransformationSql(flow);
            }
            
            log.info("数据流处理 SQL: {}", insertSql);
            stmtSet.addInsertSql(insertSql);
            log.info("添加数据流处理: {} -> {}", flow.getSourceTable(), flow.getTargetTable());
        }
        
        stmtSet.execute();
        log.info("数据处理作业已提交，开始异步执行。");
    }

    /**
     * 启动统计监控
     */
    private static void startStatisticsMonitor() {
        log.info("=== 7. 启动监控线程 ===");
        
        scheduler = Executors.newSingleThreadScheduledExecutor(r -> {
            Thread t = new Thread(r, "Universal-Job-Monitor");
            t.setDaemon(true);
            return t;
        });

        scheduler.scheduleAtFixedRate(() -> {
            if (isRunning) {
                log.info("--- 作业状态检查 [时间: {}] ---", LocalDateTime.now());
                log.info("作业名称: {}", jobConfig.getJobName());
                log.info("作业描述: {}", jobConfig.getDescription());
                log.info("数据源数量: {}", jobConfig.getDataSources().size());
                log.info("数据目标数量: {}", jobConfig.getDataSinks().size());
                log.info("数据流数量: {}", jobConfig.getDataFlows().size());
                
                // 打印监控统计信息
                UniversalJobMonitor.printStatistics();
                log.info("------------------------------------------");
            }
        }, 30, jobConfig.getMonitoringInterval(), TimeUnit.SECONDS);
        
        log.info("监控线程已启动。");
        log.info("============================");
    }

    /**
     * 注册JVM关闭钩子
     */
    private static void registerShutdownHook() {
        log.info("=== 8. 注册JVM关闭钩子 ===");
        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            log.warn("接收到关闭信号，开始执行优雅停机...");
            cleanupAndExit(0);
        }, "Shutdown-Hook-Thread"));
        log.info("关闭钩子注册成功。");
        log.info("=========================");
    }

    /**
     * 阻塞主线程，直到 isRunning 状态变为 false
     */
    private static void waitForTermination() throws InterruptedException {
        while (isRunning) {
            Thread.sleep(1000);
        }
    }

    /**
     * 统一的资源清理和退出方法
     */
    private static void cleanupAndExit(int status) {
        isRunning = false;
        log.info("开始清理资源...");
        if (scheduler != null && !scheduler.isShutdown()) {
            scheduler.shutdown();
            try {
                if (!scheduler.awaitTermination(5, TimeUnit.SECONDS)) {
                    scheduler.shutdownNow();
                }
            } catch (InterruptedException e) {
                scheduler.shutdownNow();
                Thread.currentThread().interrupt();
            }
            log.info("监控调度器已关闭。");
        }
        UniversalJobMonitor.recordJobEnd();
        log.info("资源清理完成，准备退出，状态码: {}", status);
    }
}
