package com.shujia.flink.sql;

import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableEnvironment;

public class Demo1WordCount {
    public static void main(String[] args) {
        //1、创建flink sql执行环境
        //传概念配置对象
        EnvironmentSettings settings = EnvironmentSettings
                .newInstance()
                .inStreamingMode()//流处理模式
                //.inBatchMode()//批处理模式
                .build();

        //创建环境
        TableEnvironment tEnv = TableEnvironment.create(settings);

        //executeSql 执行flink sql  一次只能执行一个sql
        //1、创建source 表
        tEnv.executeSql("CREATE TABLE lines (\n" +
                "    line STRING\n" +
                ") WITH (\n" +
                "    'connector' = 'kafka',-- 数据源类型\n" +
                "    'topic' = 'lines',-- topic\n" +
                "    'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',-- broker\n" +
                "    'properties.group.id' = 'testGroup',-- 消费者组\n" +
                "    'scan.startup.mode' = 'earliest-offset', -- 读取数据的位置\n" +
                "    'format' = 'csv', -- 读取数据的格式\n" +
                "    'csv.field-delimiter'= '|'\n" +
                ")");

        //2、创建sink表
        tEnv.executeSql("CREATE TABLE print_table (\n" +
                "    word STRING,\n" +
                "    num BIGINT\n" +
                ") WITH (\n" +
                "     'connector' = 'print'\n" +
                ")");

        //加载hive的函数
        tEnv.executeSql("LOAD MODULE hive WITH ('hive-version' = '3.1.2')");

        //3、执行sql
        tEnv.executeSql("insert into print_table\n" +
                "select word,count(1) as num\n" +
                "from \n" +
                "lines,\n" +
                "LATERAL TABLE(explode(split(line,','))) t(word)\n" +
                "group by word");

    }
}
