from pyspark.sql import SparkSession
from pyspark.sql.functions import col, expr, concat, countDistinct, to_timestamp, year, count, month, max, weekofyear, mean, min
from pyspark.sql.types import StructType, ArrayType, StringType, StructField, IntegerType, BooleanType, FloatType

# Driver
spark = SparkSession \
    .builder \
    .master('local') \
    .appName('HelloSpark') \
    .getOrCreate()

fire_schema = StructType([StructField("CallNumber", IntegerType(), True),
                          StructField("UnitID", StringType(), True),
                          StructField("IncidentNumber", IntegerType(), True),
                          StructField("CallType", StringType(), True),
                          StructField("CallDate", StringType(), True),
                          StructField("WatchDate", StringType(), True),
                          StructField("CallFinalDisposition", StringType(), True),
                          StructField("AvailableDtTm", StringType(), True),
                          StructField("Address", StringType(), True),
                          StructField("City", StringType(), True),
                          StructField("Zipcode", IntegerType(), True),
                          StructField("Battalion", StringType(), True),
                          StructField("StationArea", StringType(), True),
                          StructField("Box", StringType(), True),
                          StructField("OriginalPriority", StringType(), True),
                          StructField("Priority", StringType(), True),
                          StructField("FinalPriority", IntegerType(), True),
                          StructField("ALSUnit", BooleanType(), True),
                          StructField("CallTypeGroup", StringType(), True),
                          StructField("NumAlarms", IntegerType(), True),
                          StructField("UnitType", StringType(), True),
                          StructField("UnitSequenceInCallDispatch", IntegerType(), True),
                          StructField("FirePreventionDistrict", StringType(), True),
                          StructField("SupervisorDistrict", StringType(), True),
                          StructField("Neighborhood", StringType(), True),
                          StructField("Location", StringType(), True),
                          StructField("RowID", StringType(), True),
                          StructField("Delay", FloatType(), True)
                          ]
                         )

df = spark.read.option('header', True).schema(fire_schema).csv('dataset/sf-fire-calls.txt')
# df.printSchema()

cleaned_df = df.withColumn('IncidentDate', to_timestamp(col('CallDate'), 'MM/dd/yyyy')) \
    .drop('CallDate') \
    .withColumn("OnWatchDate", to_timestamp(col("WatchDate"), "MM/dd/yyyy")) \
    .drop("WatchDate") \
    .withColumn("AvailableDtTS", to_timestamp(col("AvailableDtTm"), "MM/dd/yyyy hh:mm:ss a")) \
    .drop("AvailableDtTm")

# 01. 打印2018年份所有的CallType，并去重
# cleaned_df \
#     .select('CallType') \
#     .where(year('IncidentDate') == 2018) \
#     .distinct().show(truncate = False)
#
# # 02. 2018年的哪个月份有最高的火警
# m = cleaned_df \
#     .withColumn('month', month('IncidentDate'))\
#     .where(year('IncidentDate') == 2018) \
#     .groupby('month') \
#     .count() \
#    # .orderBy(col('count').desc())
# # m.show()
# m.select(col('month'))\
#     .where(col('count') == m.select(max(col('count'))).collect()[0][0])\
#     .show()
#
# # 03. San Francisco的哪个neighborhood在2018年发生的火灾次数最多？
# Neigh = cleaned_df \
#     .where(year('IncidentDate') == 2018) \
#     .where(col('City') == 'San Francisco') \
#     .groupby('Neighborhood') \
#     .count() \
#     .orderBy(col('count').desc())
# Neigh.show()
# Neigh.select(col('Neighborhood'))\
#     .where(col('count') == Neigh.select(max(col('count'))).collect()[0][0])\
#     .show()

# 04. San Francisco的哪个neighborhood在2018年响应最慢？
# Nei = cleaned_df \
#     .where(year('IncidentDate') == 2018) \
#     .where(col('City') == 'San Francisco') \
#     .groupby('Neighborhood') \
#     .mean() \
#     #.orderBy(col('avg(Delay)').desc())
# Nei.show()
# Nei.select(col('Neighborhood'))\
#     .where(col('avg(Delay)') == Nei.select(max(col('avg(Delay)'))).collect()[0][0])\
#     .show()

# 05. 2018年的哪一周的火警次数最多
# Week = cleaned_df \
#     .withColumn('week', weekofyear('IncidentDate'))\
#     .where(year('IncidentDate') == 2018)\
#     .groupby('week') \
#     .count() \
# #     .orderBy(col('count').desc())
# # Week.show()
# Week.select(col('week'))\
#     .where(col('count') == Week.select(max(col('count'))).collect()[0][0])\
#     .show()

# 06. “neighborhood“ “zip code“ 和“number of fire calls”有关联（correlation）吗？


# 07. 实现使用parquet存储并读取
cleaned_df.write.format("parquet").save("dataset/fire")
