package sql.d20241107;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * 常规join
 *
 *  inner join:
 *    L表每来一条数据都会和 R表所有历史数据join, 有join到则输出，没有join到则不输出
 *    R表每来一条数据都会和L表所有历史数据join，有join到则输出，没有join到则不输出
 *
 *  left join
 *    L表每来一条数据都会和R表所有历史数据join:
 *      有join到则输出 +I[L,R]
 *      没有join到输出 +I[L, NULL], 当R表有新数据来并且join到了则输出 +D[L, NULL] and +I[L, R]
 *    L表每来一条数据都会和R表所有历史数据join:
 *      有join到则输出 +I[L,R]
 *      没有join到则不输出
 *
 *  right join和left join正好相反
 *
 *  full join 就比较好理解了
 *    当没有关联的也进行输出，无论L还是R表
 *
 *
 *  注意事项：
 *    常规join在无界流上执行，状态会秩序增大，因此需要注意配置状态的过期清理
 *
 *
 */
public class C1_Join {


  public static void main(String[] args) throws InterruptedException {


    Configuration flinkConf = new Configuration();
    flinkConf.setString("rest.port","9093");
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(flinkConf);
    env.setParallelism(1);

    env.enableCheckpointing(2000);
    CheckpointConfig checkpointConfig = env.getCheckpointConfig();
    checkpointConfig.setMaxConcurrentCheckpoints(1);
    checkpointConfig.setCheckpointStorage("file:///flink_ckp");

    StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    String ods1 = "CREATE TABLE ods_tb1 ( " +
        " id BIGINT," +
        " name STRING" +
        ") WITH ( " +
        "  'connector' = 'kafka'," +
        "  'topic' = 'test1'," +
        "  'properties.bootstrap.servers' = 'kafka:9092'," +
        "  'properties.group.id' = 'JoinTest4'," +
        "  'scan.startup.mode' = 'latest-offset'," +
        "  'format' = 'csv'" +
        ")";

    String ods2 = "CREATE TABLE ods_tb2 ( " +
        " id BIGINT," +
        " name STRING" +
        ") WITH ( " +
        "  'connector' = 'kafka'," +
        "  'topic' = 'test2'," +
        "  'properties.bootstrap.servers' = 'kafka:9092'," +
        "  'properties.group.id' = 'JoinTest4'," +
        "  'properties.max.partition.fetch.bytes' = '10485760'," +
        "  'scan.startup.mode' = 'latest-offset'," +
        "  'format' = 'csv'" +
        ")";


    String joinRes = "CREATE VIEW res AS " +
        " SELECT t1.id,t1.name nn1, t2.name as nn2 FROM ods_tb1 t1 " +
        " LEFT JOIN " +
        " ods_tb2 t2 " +
        " on t1.id = t2.id ";

    tableEnv.executeSql(ods1);
    tableEnv.executeSql(ods2);

    tableEnv.executeSql(joinRes);

    tableEnv.executeSql("desc res").print();

    tableEnv.executeSql("SELECT * FROM res").print();

  }
}
