package com.atguigu.Flink.sql.connector;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class Flink03_KafkaConnector {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //Kafka Connector read
        String createSourceTable =
                "CREATE TABLE t1 (\n" +
                        "  `id` STRING,\n" +
                        "  `vc` INT,\n" +
                        "  `ts` BIGINT\n" +
                        ") WITH (\n" +
                        "  'connector' = 'kafka',\n" +
                        "  'topic' = 'topicA',\n" +
                        "  'properties.bootstrap.servers' = 'hadoop102:9092,hadoop103:9092,hadoop104:9092',\n" +
                        "  'properties.group.id' = 'flink12',\n" +
                        "  'scan.startup.mode' = 'specific-offsets',\n" +
                        "  'properties.auto.offset.reset' = 'latest' ," +
                        "  'scan.startup.specific-offsets' = 'partition:0,offset:1210;partition:1,offset:1066;partition:2,offset:1366;partition:3,offset:1230' ," +
                        "  'format' = 'csv'\n" +
                        ")" ;

        String createSourceSql =
                " create table t1 (" +
                        " id STRING , " +
                        " vc INT , " +
                        " ts BIGINT " +
                        " ) WITH (" +
                        " 'connector' = 'datagen' ," +
                        " 'rows-per-second' = '100' , " +
                        " 'number-of-rows' = '100000000' , " +
                        " 'fields.id.kind' = 'random' , " +
                        " 'fields.id.length' = '10' , " +
                        " 'fields.vc.kind' = 'random' , " +
                        " 'fields.vc.min' = '100', " +
                        " 'fields.vc.max' = '500', " +
                        " 'fields.ts.kind' = 'sequence' , " +
                        " 'fields.ts.start' = '1' , "  +
                        " 'fields.ts.end' = '100000000'"  +
                        " )" ;

        tableEnv.executeSql(createSourceSql);
        //tableEnv.sqlQuery("select id , vc , ts from t1 ").execute().print();
        Table table = tableEnv.sqlQuery("select id,vc,ts from t1");
        tableEnv.createTemporaryView("t2",table);

        //Kafak Connector write
        String createSinkTable =
                "CREATE TABLE t3 (\n" +
                        "  `id` STRING,\n" +
                        "  `vc` INT,\n" +
                        "  `ts` BIGINT\n" +
                        ") WITH (\n" +
                        "  'connector' = 'kafka',\n" +
                        "  'topic' = 'topicA',\n" +
                        "  'properties.bootstrap.servers' = 'hadoop102:9092,hadoop103:9092,hadoop104:9092',\n" +
                        "  'sink.delivery-guarantee' = 'at-least-once' , " +
                        "  'format' = 'csv'\n" +
                        ")" ;

        tableEnv.executeSql(createSinkTable) ;

        tableEnv.executeSql("insert into t3 select * from t2") ;

        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
}
