from pyspark.context import SparkContext
from pyspark.streaming.context import StreamingContext

sc = SparkContext()

# 1、创建流处理的环境
# 微批处理：每隔一段时间处理一次，并不是一条一条处理
ssc = StreamingContext(sc, batchDuration=5)

# 2、读取数据
# 在服务器中启动socket nc -lk 8888
# DStream:流处理编程模型，底层也是RDD
lines_ds = ssc.socketTextStream("master", 8888)

# 算子代码每隔5秒执行一次
words_ds = lines_ds.flatMap(lambda line: line.split(","))

kv_ds = words_ds.map(lambda word: (word, 1))

# reduceByKey：只能实现每个批次内求和
count_ds = kv_ds.reduceByKey(lambda x, y: x + y)

count_ds.pprint()

# 启动spark streaming程序
ssc.start()
ssc.awaitTermination()
ssc.stop()
