from pyflink.common import SimpleStringSchema, WatermarkStrategy, Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.connectors.kafka import KafkaSource, KafkaOffsetsInitializer
from pyflink.table import StreamTableEnvironment
from pyflink.table.expressions import col

# 创建flink流程处理环境
env = StreamExecutionEnvironment.get_execution_environment()

env.set_parallelism(1)

# 基于流处理环境创建sql执行环境
t_env = StreamTableEnvironment.create(env)

# 1、创建DS
source = KafkaSource.builder() \
    .set_bootstrap_servers("master:9092") \
    .set_topics("students") \
    .set_group_id("my-group") \
    .set_starting_offsets(KafkaOffsetsInitializer.latest()) \
    .set_value_only_deserializer(SimpleStringSchema()) \
    .build()

# 基于kafka构建DataStream (无界流)
lines_ds = env.from_source(source, WatermarkStrategy.no_watermarks(), "Kafka Source")


# 解析数据
def map_fun(line):
    import json
    json_dict = json.loads(line)
    id = json_dict["id"]
    name = json_dict["name"]
    age = json_dict["age"]
    sex = json_dict["sex"]
    clazz = json_dict["clazz"]
    return id, name, age, sex, clazz


students_ds = lines_ds.map(map_fun,
                           output_type=Types.TUPLE(
                               [Types.STRING(), Types.STRING(), Types.INT(), Types.STRING(), Types.STRING()]))

# 2、将流转换成动态表,返回table，相当于spark的DF，用于编写DSL
students = t_env.from_data_stream(students_ds, col("id"), col("name"), col("age"), col("sex"), col("clazz"))

# 打印表结构
students.print_schema()

# 3、基于表进行连续查询,返回一个新的动态表
# 1、使用DSL处理
# clazz_num = students \
#     .group_by(col("clazz")) \
#     .select(col("clazz"), col("clazz").count)


# 2、使用sql处理
# 创建临时视图，用于编写sql
t_env.create_temporary_view("students", students)
# 执行查询语句，返回新的动态表
clazz_num = t_env.sql_query("""
select clazz,count(1) as num
from 
students
group by clazz
""")


# 4、将结果表转换成流
# to_retract_stream: 转换成撤回流
clazz_num_ds = t_env.to_retract_stream(clazz_num, Types.ROW([Types.STRING(), Types.LONG()]))

# 打印结果
clazz_num_ds.print()

env.execute()
