# from pyspark.sql import SparkSession
# from pyspark.sql.functions import explode, window
# from pyspark.sql.functions import split
#
# spark = SparkSession \
#     .builder \
#     .appName("Data") \
#     .getOrCreate()
# spark.sparkContext.setLogLevel("WARN")
#
# df1 = spark.read.csv("file:///Users/sonto/Workspace/Rimi/P1902/spark_example/up2.csv")
# df2 = spark.read.csv("file:///Users/sonto/Workspace/Rimi/P1902/spark_example/UserProfile.csv")
#
# # df1.show()
# # df2.show()
#
# """
# select * from df1 union select * from df2
# """
# # df1.union(df2).write.csv("file:///Users/sonto/Workspace/Rimi/P1902/spark_example/full.csv")
#
# # df3 = spark.read.csv("file:///Users/sonto/Workspace/Rimi/P1902/spark_example/full.csv")
# # df3.show(n=100)
#
# #
# new_df1 = df1.select("_c0", "_c1", "_c2")
# new_df2 = df1.select("_c0", "_c3", "_c4")
#
# # new_df1.show()
# # new_df2.show();
# #
# # new_df1.join(new_df2, on="_c0").show()
# # for x in new_df1.take(3):
# #     print(x)
# print(new_df1.head(3))
#
# lines = spark.socketTextStream("localhost", 9999)
# lines.flatMap()
# # dataStream = spark.readStream.format("socket").option("host", "127.0.0.1").option("port", 8088).load()
# #
# # df = spark.read.csv()
# #
# # words = dataStream.select(explode(split(dataStream.value, " ")).alias("word"))
# #
# # wordCounts = words.groupby(window()).count()
# #
# # query = wordCounts \
# #     .writeStream \
# #     .outputMode("complete   ") \
# #     .format("console") \
# #     .start()
# #
# # query.awaitTermination()
