from pyspark.sql import SparkSession, functions as F
from pyspark.sql.types import StructType, StringType, IntegerType

if __name__ == '__main__':
    # 构建SparkSession对象
    spark = SparkSession.builder. \
        appName("local[*]"). \
        config("spark.sql.shuffle.partitions", "4"). \
        getOrCreate()
    # appName 设置程序名称
    # config 设置常用属性。可以通过此来设置配置
    # 最后通过getOrCreate 创建 SparkSession对象

    # 从SparkSession中获取SparkContext
    sc = spark.sparkContext

    # TODO 1.加载数据

    schema = StructType().add("name", StringType(), nullable=True) \
        .add("age", IntegerType(), nullable=True) \
        .add("job", StringType(), nullable=True)

    df = spark.readStream \
        .format(source="csv") \
        .schema(schema) \
        .option("header", "true") \
        .option("sep", ";") \
        .load("../data/streamdata")
    #  .schema(schema) ：针对机构化数据，哪怕有约束也需要单独指定


    df.printSchema()

    print(type(df))

    # TODO 2.处理数据

    # TODO 3.输出结果
    df.writeStream \
        .format("console") \
        .outputMode("append") \
        .option("truncate", False) \
        .start() \
        .awaitTermination()  # TODO 4.启动并等待结束

    # .outputMode("complete")
    # Complete output mode not supported when there are no streaming aggregations on streaming DataFrames/Datasets;
    # complete输出模式，必须有聚合的操作才能使用

    # TODO 5.关闭资源
    spark.stop()
