package com.atguigu.chapter11;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/6/19 9:26
 */
public class Flink05_SQL_Kafka_Kafka {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        
        // 2. 创建流式表的环境
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
    
        // 创建动态表与source关联
        tEnv.executeSql("create table s1(" +
                            "   id string, " +
                            "   ts bigint, " +
                            "   vc int " +
                            ")with(" +
                            "   'connector' = 'kafka', " +
                            "   'properties.bootstrap.servers' = 'hadoop162:9092', " +
                            "   'properties.group.id' = 'Flink05_SQL_Kafka_Kafka', " +
                            "   'topic' = 's1', " +
                            "   'scan.startup.mode' = 'latest-offset', " +
                            "   'format' = 'csv' " +
                            ")");
        // 创建动态表与sink关联
        tEnv.executeSql("create table s2(" +
                            "   id string, " +
                            "   vc int " +
                            ")with(" +
                            "   'connector' = 'kafka', " +
                            "   'properties.bootstrap.servers' = 'hadoop162:9092', " +
                            "   'topic' = 's2', " +
                            "   'format' = 'csv', " +
                            "   'sink.partitioner' = 'round-robin'"  +
                            ")");
    
        //Table result = tEnv.sqlQuery("select id, vc from s1");
        //result.executeInsert("s2");
    
        tEnv.executeSql("insert into s2 select id, vc from s1 ");
    
    }
}
