from pyflink.common import SimpleStringSchema, WatermarkStrategy
from pyflink.datastream import StreamExecutionEnvironment, MapFunction, RuntimeContext
from pyflink.datastream.connectors.kafka import KafkaSource, KafkaOffsetsInitializer
from pyparsing import lineEnd
import pymysql

# 1、创建环境
env = StreamExecutionEnvironment.get_execution_environment()

env.set_parallelism(1)

# 2、读取数据
source = KafkaSource.builder() \
    .set_bootstrap_servers("master:9092") \
    .set_topics("words") \
    .set_group_id("my-group") \
    .set_starting_offsets(KafkaOffsetsInitializer.earliest()) \
    .set_value_only_deserializer(SimpleStringSchema()) \
    .build()

lines_ds = env.from_source(source, WatermarkStrategy.no_watermarks(), "Kafka Source")


def flat_map_fun(line):
    for word in line.split(","):
        yield word, 1


# 1、一行转换成多行
words_ds = lines_ds.flat_map(flat_map_fun)

# 2、统计单词数量
word_count_ds = words_ds.key_by(lambda kv: kv[0]).sum(1)


class ToMySQLMapFun(MapFunction):
    # open方法每一个task在启动的时候执行一次
    def open(self, runtime_context: RuntimeContext):
        # 1、创建数据库连接
        # pip install pymysql
        self.con = pymysql.connect(host="master", port=3306, user="root", password="123456", database="shujia")

    # close在任务结束的时候每一个task执行一次
    def close(self):
        self.con.close()

    def map(self, kv):
        word = kv[0]
        count = kv[1]

        cursor = self.con.cursor()
        # replace into: 替换插入
        cursor.execute("replace into word_count(word,count) values (%s,%s)", (word, count))
        self.con.commit()


# 将数据保存到mysql中
word_count_ds.map(ToMySQLMapFun())

env.execute()
