package sql;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * 不使用窗口，直接在数据流上 group by xx
 * 作用：在整个数据流上统计xx 输出是一个回撤流
 *
 *
 */
public class D2_GROUP_COUNT {

  public static void main(String[] args) {

    Configuration configuration = new Configuration();
    configuration.setString("rest.port", "9091");
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);
    env.setParallelism(1);
    StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

    String genSql = "CREATE TABLE ods_tb ( " +
        " stime STRING," +
        " name STRING," +
        " id BIGINT," +
        " rowtime AS to_timestamp(stime)," +
        " WATERMARK FOR rowtime AS rowtime - interval '30' second" +
        ") WITH ( " +
        "  'connector' = 'kafka',\n" +
        "  'topic' = 'test',\n" +
        "  'properties.bootstrap.servers' = 'kafka:9092',\n" +
        "  'properties.group.id' = 'testGroup',\n" +
        "  'scan.startup.mode' = 'latest-offset',\n" +
        "  'format' = 'json'" +
        ")";


    String print = "CREATE TABLE sink_print (" +"    " +
        "    name STRING, " +
        "    cnt BIGINT " +
        ") WITH (" +
        "     'connector' = 'print'" +
        ")";

    String sql = "INSERT INTO sink_print " +
        " SELECT name,count(1) as cnt FROM ods_tb" +
        " GROUP BY name";

    tableEnv.executeSql(genSql);
    tableEnv.executeSql(print);
    tableEnv.executeSql(sql);




  }
}
