package com.atguigu.flinksql;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class Flink06_SQL_Kafka {
    public static void main(String[] args) {
        Configuration configuration = new Configuration();
        configuration.setInteger("rest.port",10000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);
        env.setParallelism(2);

        //创建表环境
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

        //创建和kafka的连接
        //使用sql语句来创建一个表用来关联kafka上的数据
        //这里创建的表是用来消费kafka的数据
        TableResult tableResult = tEnv.executeSql("create table sensor(" +
                " id string, " +
                " ts bigint, " +
                " vc int " +
                ")with(" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 's1',\n" +
                "  'properties.bootstrap.servers' = 'hadoop162:9092',\n" +
                "  'properties.group.id' = 'testGroup',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'format' = 'csv')");

        //执行查询sql
        //tEnv.executeSql("select * from sensor ").print();

        //创建表生产到kafka的数据
        tEnv.executeSql("create table s_out(" +
                " id string, " +
                " ts bigint, " +
                " vc int " +
                ")with(" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 's2',\n" +
                "  'properties.bootstrap.servers' = 'hadoop162:9092',\n" +
                "  'sink.partitioner' = 'round-robin',\n" +
                "  'format' = 'csv')");

        //将从s1消费到的数据生产到kafka的s2上去
        tEnv.executeSql(" insert into s_out select * from sensor where id = 'sensor1' ");


    }
}
