from pyflink.table import EnvironmentSettings, TableEnvironment

# # 1、创建环境
env_settings = EnvironmentSettings.in_streaming_mode()
table_env = TableEnvironment.create(env_settings)

# 2、创建source 表
table_env.execute_sql("""
CREATE TABLE students_json (
    `id` STRING,
    `name` STRING,
    `age` INT,
    `sex` STRING,
    `clazz` STRING
) WITH (
    'connector' = 'kafka', -- 数据源类型
    'topic' = 'students',
    'properties.bootstrap.servers' = 'master:9092',
    'properties.group.id' = 'testGroup',
    'scan.startup.mode' = 'earliest-offset',
    'format' = 'json', -- 数据格式,flink会自动解析
    'json.ignore-parse-errors' = 'true'
)
""")

# 3、创建sink 表
# 增加依赖flink-connector-jdbc-3.2.0-1.18.jar,mysql-connector-java-8.0.29.jar
# C:\ProgramData\miniconda3\envs\flink_env\Lib\site-packages\pyflink\lib
table_env.execute_sql("""
CREATE TABLE clazz_num (
    clazz STRING,
    num BIGINT,
    PRIMARY KEY (clazz) NOT ENFORCED
) WITH (
    'connector' = 'jdbc',
    'url' = 'jdbc:mysql://master:3306/bigdata',
    'table-name' = 'clazz_num', -- mysql中的表,需要先在mysql中创建表
    'username' = 'root',
    'password' = '123456'
)
""")

# 4、执行查询
table_env.execute_sql("""
insert into clazz_num
select clazz,count(1) as num
from 
students_json
group by clazz
""").wait()
