package sql.d20241107;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * 数组扩展
 *  将数组列转化为多行
 */
public class C1_join_array_expansion {


  public static void main(String[] args) throws InterruptedException {


    Configuration flinkConf = new Configuration();
    flinkConf.setString("rest.port","9093");
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(flinkConf);
    env.setParallelism(1);

    env.enableCheckpointing(2000);
    CheckpointConfig checkpointConfig = env.getCheckpointConfig();
    checkpointConfig.setMaxConcurrentCheckpoints(1);
    checkpointConfig.setCheckpointStorage("file:///flink_ckp");

    StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

    String ods1 = "CREATE TABLE ods_tb1 ( " +
        " id BIGINT," +
        " cities ARRAY<STRING>" +
        ") WITH ( " +
        "  'connector' = 'kafka'," +
        "  'topic' = 'test1'," +
        "  'properties.bootstrap.servers' = 'kafka:9092'," +
        "  'properties.group.id' = 'JoinTest4'," +
        "  'scan.startup.mode' = 'latest-offset'," +
        "  'format' = 'json'" +
        ")";
    String joinRes = "CREATE VIEW res AS " +
        " SELECT " +
        " id, cities, t.city " +
        " FROM ods_tb1" +
        " CROSS JOIN UNNEST(cities) AS t(city)";

    tableEnv.executeSql(ods1);

    tableEnv.executeSql(joinRes);

    tableEnv.executeSql("desc res").print();

    tableEnv.executeSql("SELECT * FROM res").print();

  }
}

