import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.Duration;

/**
 * @Author lzc
 * @Date 2022/5/21 10:17
 */
public class Join_1 {
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 2000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);
        
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        // 状态在内存中的保存时间
        tEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(10));
        
        tEnv.executeSql("create table t1(" +
                            "id string, " +
                            "name string" +
                            ")with(" +
                            " 'connector' = 'kafka', " +
                            " 'properties.bootstrap.servers' = 'hadoop162:9092', " +
                            " 'properties.group.id' = 'atguigu', " +
                            " 'topic' = 't1', " +
                            " 'format' = 'csv', " +
                            " 'scan.startup.mode' = 'latest-offset'" +
                            ")");
    
        tEnv.executeSql("create table t2(" +
                            "id string, " +
                            "age int " +
                            ")with(" +
                            " 'connector' = 'kafka', " +
                            " 'properties.bootstrap.servers' = 'hadoop162:9092', " +
                            " 'properties.group.id' = 'atguigu', " +
                            " 'topic' = 't2', " +
                            " 'format' = 'csv', " +
                            " 'scan.startup.mode' = 'latest-offset'" +
                            ")");
        
        // 1. join
        /*tEnv.sqlQuery("select " +
                          " t1.id id1, " +
                          " t2.id id2, " +
                          " t1.name, " +
                          " t2.age " +
                          "from t1 " +
                          "join t2 on t1.id=t2.id")
            .execute()
            .print();*/
        
        
        // 2. left join\
        // 左连接的时候, 只有左表的数据超过10s没有join, 才会从状态清除
        Table result = tEnv.sqlQuery("select " +
                                        " t1.id id1, " +
                                        " t1.name, " +
                                        " t2.id id2, " +
                                        " t2.age " +
                                        "from t1 " +
                                        "left join t2 on t1.id=t2.id");
    
        tEnv.executeSql("create table t12(" +
                            "id1 string, " +
                            "name string, " +
                            "id2 string, " +
                            "age int, " +
                            "primary key(id1)not enforced" +
                            ")with(" +
                            " 'connector' = 'upsert-kafka', " +
                            " 'properties.bootstrap.servers' = 'hadoop162:9092', " +
                            " 'topic' = 't12', " +
                            " 'key.format' = 'json', " +
                            " 'value.format' = 'json' " +
                            ")");
        
        result.executeInsert("t12");
        
    
        // 3. interval join
        
        // 4. lookup join
        
        
       
    }
}
