package com.atguigu.gmall.realtime.join;

import com.atguigu.gmall.realtime.app.BaseSQLApp;
import com.atguigu.gmall.realtime.util.SQLUtil;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.Duration;

/**
 * @Author lzc
 * @Date 2023/8/2 09:08
 */
public class InnerJoin extends BaseSQLApp {
    public static void main(String[] args) {
        new InnerJoin().start(
            5555,
            2,
            "InnerJoin"
        );
    }
    
    @Override
    public void handle(StreamExecutionEnvironment env,
                       StreamTableEnvironment tEnv) {
        tEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(20));
        
        tEnv.executeSql("create table t1(" +
                            " id string, " +
                            " name string " +
                            ")" + SQLUtil.getKafkaSourceSQL("InnerJoin", "t1", "csv"));
    
        tEnv.executeSql("create table t2(" +
                            " id string, " +
                            " age int " +
                            ")" + SQLUtil.getKafkaSourceSQL("InnerJoin", "t2", "csv"));
        
        // 在连接的时候, 默认情况, 所有表的数据永久的保存在状态中(内存)
        // 务必根据两表的业务关系,设置 ttl
        /*tEnv.sqlQuery("select " +
                          " t1.id, " +
                          " name, " +
                          " age " +
                          "from t1 join t2 on t1.id=t2.id")
            .execute()
            .print();*/
        //
        tEnv.sqlQuery("select " +
                          " t1.id, " +
                          " name, " +
                          " age " +
                          "from t1, t2 where t1.id=t2.id")
            .execute()
            .print();
    }
}
