
import bean.Bean1;
import bean.Bean2;
import com.zhang.gmall.utils.KafkaUtil;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.Duration;

public class FlinkSQLJoinTest2 {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //创建Kafka Upsert表
        tableEnv.executeSql("" +
                "create table upsert_kafka_test( " +
                "    `id1` String, " +
                "    `id2` String, " +
                "    `name` String, " +
                "    `sex` String, " +
                "    PRIMARY KEY (id1) NOT ENFORCED " +
                ")" + KafkaUtil.getUpsertKafkaDDL("kafka_test"));

        //将数据写入Kafka
        Table table = tableEnv.sqlQuery("select * from upsert_kafka_test");
        tableEnv.toChangelogStream(table).print();

        env.execute();
    }

}
