package com.example.sparksubmitter.service;

import org.apache.flink.client.deployment.ClusterDeploymentException;
import org.apache.flink.client.deployment.ClusterSpecification;
import org.apache.flink.client.deployment.application.ApplicationConfiguration;
import org.apache.flink.client.program.ClusterClient;
import org.apache.flink.client.program.ClusterClientProvider;
import org.apache.flink.configuration.*;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.yarn.YarnClientYarnClusterInformationRetriever;
import org.apache.flink.yarn.YarnClusterDescriptor;
import org.apache.flink.yarn.YarnClusterInformationRetriever;
import org.apache.flink.yarn.configuration.YarnConfigOptions;
import org.apache.flink.yarn.configuration.YarnDeploymentTarget;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Test;

import java.io.File;
import java.net.MalformedURLException;
import java.net.URI;
import java.util.Collections;

import static org.apache.flink.configuration.MemorySize.MemoryUnit.MEGA_BYTES;

public class FlinkService {

    @Test
    public void flinkJar() {
        try {
            // 设置 Hadoop 环境变量
            System.setProperty("hadoop.home.dir", "D:\\hadoop-3.3.4");
            System.setProperty("HADOOP_CONF_DIR", "D:\\hadoop_conf");
            System.setProperty("HADOOP_USER_NAME", "hadoop");
            // 1. 创建 Flink 配置
            Configuration flinkConfig = new Configuration();

            // 2. 设置 YARN 相关配置
            flinkConfig.setString(YarnConfigOptions.APPLICATION_NAME, "wangz_test_flink_sql");
            flinkConfig.setString(YarnConfigOptions.APPLICATION_QUEUE, "default");
            flinkConfig.setInteger(YarnConfigOptions.APPLICATION_PRIORITY, 1);
            flinkConfig.setBoolean(DeploymentOptions.ATTACHED, false); // 分离模式

            URI distJarUri = new File("D:\\flink-1.15.3\\lib\\flink-dist-1.15.3.jar").toURI();
            URI jarUri = new File("D:\\flink-1.15.3\\examples\\streaming\\WordCount.jar").toURI();


            flinkConfig.set(YarnConfigOptions.FLINK_DIST_JAR, distJarUri.toURL().toString());

            flinkConfig.set(PipelineOptions.JARS, Collections.singletonList(jarUri.toURL().toString()));

            //设置为 PER_JOB 模式
            flinkConfig.set(DeploymentOptions.TARGET, YarnDeploymentTarget.APPLICATION.getName());

            flinkConfig.set(JobManagerOptions.TOTAL_PROCESS_MEMORY, MemorySize.parse("1028", MEGA_BYTES));
            flinkConfig.set(TaskManagerOptions.TOTAL_PROCESS_MEMORY, MemorySize.parse("1096", MEGA_BYTES));

            // 3. 设置 Hadoop 配置
            YarnConfiguration yarnConfig = new YarnConfiguration();
            yarnConfig.set("hadoop.security.authentication", "simple");
            // 如果有自定义的 Hadoop 配置目录，可以这样设置
            // yarnConfig.addResource(new Path("/path/to/hadoop/conf/core-site.xml"));
            // yarnConfig.addResource(new Path("/path/to/hadoop/conf/hdfs-site.xml"));
            // yarnConfig.addResource(new Path("/path/to/hadoop/conf/yarn-site.xml"));

            // 4. 创建 YARN 客户端
            YarnClient yarnClient = YarnClient.createYarnClient();
            yarnClient.init(yarnConfig);
            yarnClient.start();

            // 5. 创建 YARN 集群描述符
            YarnClusterInformationRetriever yarnRetriever = YarnClientYarnClusterInformationRetriever.create(yarnClient);

            YarnClusterDescriptor clusterDescriptor = new YarnClusterDescriptor(
                    flinkConfig,
                    yarnConfig,
                    yarnClient,
                    yarnRetriever,
                    false);

            // 6. 设置 Flink 发行版路径 (可选，如果环境变量已设置)
            // clusterDescriptor.setLocalJarPath(new Path("D:\\flink-1.15.3\\lib\\flink-dist-1.15.3.jar"));

            // 7. 设置 Flink 库和用户代码的 classpath
            // 这里需要包含 Flink 的 lib 目录和你的应用程序 JAR，这里以 WordCount.jar 为例子
            //clusterDescriptor.addShipFiles(Collections.singletonList(new File("D:\\flink-1.15.3\\examples\\streaming\\WordCount.jar")));

            // 8. 创建集群规格
            ClusterSpecification clusterSpec = new ClusterSpecification.ClusterSpecificationBuilder()
                    //.setMasterMemoryMB(1024)  // JobManager 内存
                    //.setTaskManagerMemoryMB(1024)  // TaskManager 内存
                    //.setSlotsPerTaskManager(1)  // 每个 TaskManager 的 slot 数
                    .createClusterSpecification();

            // 9. 创建应用配置 (包含 main 类和程序参数)
            String[] appArgs = new String[]{"--input", "hdfs:///input/text.txt",
                    "--output", "hdfs:///output/wordcount"};
            ApplicationConfiguration appConfig = new ApplicationConfiguration(
                    appArgs,
                    null);
            // 你的 Flink SQL 作业主类

            // 10. 提交应用到 YARN
            ClusterClientProvider<ApplicationId> clusterClientProvider = clusterDescriptor
                    .deployApplicationCluster(clusterSpec, appConfig);

            ClusterClient<ApplicationId> clusterClient = clusterClientProvider.getClusterClient();

            // 11. 打印应用 ID
            ApplicationId applicationId = clusterClient.getClusterId();
            System.out.println("Application ID: " + applicationId);

            // 12. 关闭客户端
            clusterClient.close();
            yarnClient.stop();
        } catch (MalformedURLException e) {
            throw new RuntimeException(e);
        } catch (ClusterDeploymentException e) {
            throw new RuntimeException(e);
        }
    }

    @Test
    public void flinkSql() {

        // 创建 TableEnvironment
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                //.useBlinkPlanner()
                .inStreamingMode()
                .build();
        TableEnvironment tEnv = TableEnvironment.create(settings);

        // 定义源表
        tEnv.executeSql("CREATE TABLE source_table (" +
                "    id INT," +
                "    name STRING," +
                "    age INT" +
                ") WITH (" +
                "    'connector' = 'datagen'," +
                "    'rows-per-second' = '1'," +
                "    'fields.id.kind' = 'sequence'," +
                "    'fields.id.start' = '1'," +
                "    'fields.id.end' = '100'" +
                ")");

        // 定义结果表
        tEnv.executeSql("CREATE TABLE sink_table (" +
                "    id INT," +
                "    name STRING," +
                "    age INT" +
                ") WITH (" +
                "    'connector' = 'print'" +
                ")");

        // 执行查询并插入结果
        tEnv.executeSql("INSERT INTO sink_table SELECT id, name, age FROM source_table WHERE age > 18");

        // 注意: 对于批处理作业，可以调用 tEnv.execute("job-name");
        // 但对于流作业，上面的 executeSql 已经触发了作业执行

    }
}

