package com.atguigu.gmall.realtime.join;

import com.atguigu.gmall.realtime.app.BaseSQLApp;
import com.atguigu.gmall.realtime.util.SQLUtil;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @Author lzc
 * @Date 2023/4/23 10:39
 */
public class InnerJoinToKafka extends BaseSQLApp {
    public static void main(String[] args) {
        new InnerJoinToKafka().init(
            50000,
            2,
            "InnerJoinDemo"
        );
        
    }
    
    @Override
    protected void handle(StreamExecutionEnvironment env, StreamTableEnvironment tEnv) {
        tEnv.executeSql("create table a(" +
                            " id string, " +
                            " name string " +
                            ")" + SQLUtil.getKafkaSourceDDL("a", "InnerJoinDemo1", "csv"));
    
        tEnv.executeSql("create table b(" +
                            " id string, " +
                            " age int " +
                            ")" + SQLUtil.getKafkaSourceDDL("b", "InnerJoinDemo2", "csv"));
    
        // 默认情况下, 左右两张表所有的数据都永久的保存到内存中
        // 应该给状态设置 ttl: 如何设置,根据业务
        Table result = tEnv.sqlQuery("select " +
                                        " a.id id," +
                                        " name, " +
                                        " age " +
                                        "from a " +
                                        "join b on a.id=b.id");
//        tEnv.createTemporaryView("result", result);
        
        tEnv.executeSql("create table ab(" +
                            "  id string, " +
                            "  name string, " +
                            "  age int " +
                            ")" + SQLUtil.getKafkaSinkDDL("ab", "json"));
    
    
        result.executeInsert("ab");
        // insert into ... select ...
    
        //tEnv.executeSql("insert into ab select * from " + result);
    }
}
