package com.atguigu.gmall.realtime.joindemo;

import com.atguigu.gmall.realtime.app.BaseSQLApp;
import com.atguigu.gmall.realtime.util.SQLUtil;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.Duration;

/**
 * @Author lzc
 * @Date 2023/2/13 10:45
 */
public class InnerJoinToKafkaDemo extends BaseSQLApp {
    public static void main(String[] args) {
        new InnerJoinToKafkaDemo().init(6000, 2, "InnerJoinDemo");
    }
    
    @Override
    public void handle(StreamExecutionEnvironment env, StreamTableEnvironment tEnv) {
        // 在内连接的时候: 每条数据在内存中只保留 30s, 超过 30s 之后, 数据被自动清除
        tEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(30));
        
        tEnv.executeSql("create table t1(" +
                            "id string," +
                            " name string)"
                            + SQLUtil.getDDLKafkaSource("t1", "atguigu", "csv"));
    
        tEnv.executeSql("create table t2(" +
                            "id string," +
                            " age int)"
                            + SQLUtil.getDDLKafkaSource("t2", "atguigu", "csv"));
        
        
        // 内连接
        Table result = tEnv.sqlQuery("select " +
                                        "t1.id id, " +
                                        "name, " +
                                        "age " +
                                        "from t1 " +
                                        "join t2 on t1.id=t2.id");
        
        tEnv.executeSql("create table t_inner(" +
                            " id string, " +
                            " name string, " +
                            " age int" +
                            ")" + SQLUtil.getDDLKafkaSink("t_inner", "json"));
    
        result.executeInsert("t_inner");
//        tEnv.executeSql("insert into t_inner select * from " + result);
    }
}
/*
steam的 join:
    window join
    
    interval join

flink sql

常规 join
1. 默认情况下, 左右表的数据永远保存到内存的状态中
2. 为了内存不出现 oom, 务必设置 ttl

 
 */