from pyspark.sql import SparkSession
from pyspark.sql.functions import explode, split, window
from pyspark.sql.types import StructType

# create a local SparkSession
spark = SparkSession \
    .builder \
    .appName("StreamFile") \
    .getOrCreate()

# set new runtime options
spark.conf.set("spark.sql.shuffle.partitions", 5) # 每个partition都会对应一个task
# spark.conf.set("spark.executor.memory", "2g") # ??
# spark.conf.set("spark.default.parallelism", 10)

# Read all the csv files written atomically in a directory
# 需要指定timestamp的格式
# 只能指定路径，而不是具体文件
userSchema = StructType().add("Timestamp", "timestamp").add("Value", "integer")
csvDF = spark \
    .readStream \
    .schema(userSchema) \
    .format("csv") \
    .option("delimiter", ",") \
    .option("timestampFormat", "yyyy-MM-dd HH:mm:ss")\
    .load("D:\Data\Spark")

# Generate running word count
wordCounts = csvDF.groupBy("value").count()

# Group the data by window and word and compute the count of each group
# windowedCounts = csvDF.groupBy(
#    window(csvDF.Timestamp, "1 minutes", "10 seconds"),
#    #csvDF.Value
#).count()

# Start running the query that prints the running counts to the console
query = wordCounts \
    .writeStream \
    .outputMode("complete") \
    .format("console") \
    .trigger(processingTime='2 seconds') \
    .start()

# query.awaitTermination()
spark.streams.awaitAnyTermination()

print("Finish")
# Basic Operations - Selection
# csvDF.select("DateTime").where("Value > 10")