package com.atguigu;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
public class FlinkTableJoinExample2{
    public static void main(String[] args) throws Exception {
        // set up the execution environment
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);
        // read data from mysql table using Flink CDC
        String mysqlSourceDDL = "CREATE TABLE mysql_source (\n" +
                "  id INT,\n" +
                "  name STRING,\n" +
                "  age INT,\n" +
                "  buy_time TIMESTAMP(3)\n" +
                ") WITH (\n" +
                "  'connector' = 'mysql-cdc',\n" +
                "  'hostname' = 'localhost',\n" +
                "  'port' = '3306',\n" +
                "  'username' = 'root',\n" +
                "  'password' = 'root',\n" +
                "  'database-name' = 'test',\n" +
                "  'table-name' = 'my_table'\n" +
                ")";
        tableEnv.executeSql(mysqlSourceDDL);
        // read data from kafka source
        String kafkaSourceDDL = "CREATE TABLE kafka_source (\n" +
                "  name STRING,\n" +
                "  pay_time TIMESTAMP(3)\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'my_topic',\n" +
                "  'properties.bootstrap.servers' = 'localhost:9092',\n" +
                "  'properties.group.id' = 'my_group',\n" +
                "  'format' = 'json'\n" +
                ")";
        tableEnv.executeSql(kafkaSourceDDL);
        // perform table join and calculation
        String joinQuery = "SELECT s.name, MAX(s.buy_time) AS max_buy_time\n" +
                "FROM mysql_source s JOIN kafka_source k ON s.name = k.name\n" +
                "GROUP BY s.name";
        Table result = tableEnv.sqlQuery(joinQuery);
        // print out the result
        result.execute().print();
    }
}