from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.sql import SparkSession
from pyspark.sql.types import Row
from pyspark.streaming.context import StreamingContext

# 创建spark sql执行环境
spark = SparkSession \
    .builder \
    .appName("rdd") \
    .getOrCreate()

# 获取spark rdd的环境
sc = spark.sparkContext

# 1、创建流处理的环境
# 微批处理：每隔一段时间处理一次，并不是一条一条处理
ssc = StreamingContext(sc, batchDuration=5)

# 2、读取数据
# 在服务器中启动socket nc -lk 8888
# DStream:流处理编程模型，底层也是RDD, 每隔5秒一个RDD
lines_ds = ssc.socketTextStream("master", 8888)


# foreach_fun: 每隔5秒执行一次
# 每次执行是RDD是不一样的
def foreach_fun(rdd: RDD):
    # 使用算子解析数据
    row_rdd = rdd.map(lambda line: Row(line=line))

    # 转换成DF
    lines_DF = spark.createDataFrame(row_rdd)

    # 注册成表
    lines_DF.createOrReplaceTempView("lines")

    # 使用sql处理数据
    count_df = spark.sql("""
    select word,count(1) as num
    from 
    lines
    lateral view explode(split(line,',')) T as word
    group by word
    """)

    count_df.show()


# 将DS转换成RDD,每隔5秒产生一个RDD
lines_ds.foreachRDD(foreach_fun)

# 启动spark streaming程序
ssc.start()
ssc.awaitTermination()
ssc.stop()
