from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
from pyspark.sql.types import StructType

# create a local SparkSession
spark = SparkSession \
    .builder \
    .appName("StructuredNetworkWordCount") \
    .getOrCreate()


# Read all the csv files written atomically in a directory
userSchema = StructType().add("DateTime", "string").add("Value", "integer")
csvDF = spark \
    .readStream \
    .option("sep", ",") \
    .schema(userSchema) \
    .csv("D:\Data\Spark")  # Equivalent to format("csv").load("/path/to/directory")

# Split the lines into words
words = csvDF.select(
   explode(
       split(csvDF.Value, " ")
   ).alias("word")
)

# Generate running word count
wordCounts = words.groupBy("word").count()

# Start running the query that prints the running counts to the console
query = wordCounts \
    .writeStream \
    .outputMode("complete") \
    .format("console") \
    .start()

query.awaitTermination()

print("Finish")
# Basic Operations - Selection
# csvDF.select("DateTime").where("Value > 10")