from pyspark.sql import SparkSession, functions as F

if __name__ == '__main__':
    # 构建SparkSession对象
    spark = SparkSession.builder. \
        appName("local[*]"). \
        config("spark.sql.shuffle.partitions", "4"). \
        getOrCreate()
    # appName 设置程序名称
    # config 设置常用属性。可以通过此来设置配置
    # 最后通过getOrCreate 创建 SparkSession对象

    # 从SparkSession中获取SparkContext
    sc = spark.sparkContext

    # TODO 1.加载数据
    df = spark.readStream \
        .format("socket") \
        .option("host", "192.168.111.129") \
        .option("port", 9999) \
        .load()

    df.printSchema()

    print(type(df))

    # TODO 2.处理数据
    # 将行拆分为单词
    words = df.select(
        F.explode(F.split(df.value, " ")).alias("word")
    )

    # 生成单词计数
    wordCounts = words.groupBy("word").count()

    # TODO 3.输出结果
    wordCounts.writeStream \
        .outputMode("complete") \
        .format("console") \
        .start() \
        .awaitTermination()   # TODO 4.启动并等待结束

    # TODO 5.关闭资源
    spark.stop()


