from pyspark.sql import SparkSession
from pyspark.sql.functions import col, year, month, weekofyear, avg, desc, count, to_timestamp, when
from pyspark.sql.types import StructField, BooleanType, IntegerType, StringType, FloatType, StructType

# 初始化 SparkSession
spark = SparkSession.builder \
    .master('local') \
    .appName('FireAnalysis') \
    .getOrCreate()

# 定义 Schema
fire_schema = StructType([
    StructField("CallNumber", IntegerType(), True),
    StructField("UnitID", StringType(), True),
    StructField("IncidentNumber", IntegerType(), True),
    StructField("CallType", StringType(), True),
    StructField("CallDate", StringType(), True),
    StructField("WatchDate", StringType(), True),
    StructField("CallFinalDisposition", StringType(), True),
    StructField("AvailableDtTm", StringType(), True),
    StructField("Address", StringType(), True),
    StructField("City", StringType(), True),
    StructField("Zipcode", IntegerType(), True),
    StructField("Battalion", StringType(), True),
    StructField("StationArea", StringType(), True),
    StructField("Box", StringType(), True),
    StructField("OriginalPriority", StringType(), True),
    StructField("Priority", StringType(), True),
    StructField("FinalPriority", IntegerType(), True),
    StructField("ALSUnit", BooleanType(), True),
    StructField("CallTypeGroup", StringType(), True),
    StructField("NumAlarms", IntegerType(), True),
    StructField("UnitType", StringType(), True),
    StructField("UnitSequenceInCallDispatch", IntegerType(), True),
    StructField("FirePreventionDistrict", StringType(), True),
    StructField("SupervisorDistrict", StringType(), True),
    StructField("Neighborhood", StringType(), True),  # 假设存在此字段
    StructField("Location", StringType(), True),
    StructField("RowID", StringType(), True),
    StructField("Delay", FloatType(), True)
])

# 读取 DataFrame
df = spark.read.option('header', True).schema(fire_schema).csv('dataset/sf-fire-calls.txt')

# --------------------------------------------------
# 1. 打印 2018 年份所有的 CallType，并去重
# --------------------------------------------------
print("2018 年份所有的 CallType（去重）:")
df.filter(year(to_timestamp('CallDate', 'MM/dd/yyyy')) == 2018) \
    .select('CallType') \
    .where(col('CallType').isNotNull()) \
    .distinct() \
    .show(truncate=False)

# --------------------------------------------------
# 2. 2018 年的哪个月份有最高的火警
# --------------------------------------------------
print("2018 年每个月份的火警次数:")
fire_2018 = df.filter(
    (year(to_timestamp(col('CallDate'), 'MM/dd/yyyy')) == 2018) &
    (col('CallDate').isNotNull())
)

fire_2018.groupBy(
    when(month(to_timestamp(col('CallDate'), 'MM/dd/yyyy')).isNotNull(),
         month(to_timestamp(col('CallDate'), 'MM/dd/yyyy'))
        ).otherwise(13).alias('Month')
    ) \
    .count() \
    .orderBy(desc('count')) \
    .show(1, truncate=False)

# --------------------------------------------------
# 3. San Francisco 的哪个 neighborhood 在 2018 年发生的火灾次数最多
# --------------------------------------------------
print("San Francisco 的 neighborhood 在 2018 年火灾次数最多的:")
fire_2018.groupBy('Neighborhood') \
         .count() \
         .orderBy(desc('count')) \
         .show(1, truncate=False)

# --------------------------------------------------
# 4. San Francisco 的哪个 neighborhood 在 2018 年响应最慢
# --------------------------------------------------
print("San Francisco 的 neighborhood 在 2018 年响应最慢的:")
df_with_response = df.withColumnRenamed('Delay', 'ResponseDelayedinMins')
df_2018_response = df_with_response.filter(
    (year(to_timestamp(col('CallDate'), 'MM/dd/yyyy')) == 2018) &
    (col('CallDate').isNotNull())
)
df_2018_response.groupBy('Neighborhood') \
                .agg(avg('ResponseDelayedinMins').alias('AvgResponseTime')) \
                .orderBy(desc('AvgResponseTime')) \
                .show(1, truncate=False)

# --------------------------------------------------
# 5. 2018 年的哪一周的火警次数最多
# --------------------------------------------------
print("2018 年哪一周的火警次数最多:")
fire_2018.groupBy(weekofyear(to_timestamp(col('CallDate'), 'MM/dd/yyyy')).alias('Week')) \
         .count() \
         .orderBy(desc('count')) \
         .show(1, truncate=False)

# --------------------------------------------------
# 6. 数据集中任意值之间有关联吗？
# --------------------------------------------------
print("数据集中任意值之间的关联性:")
df.describe().show(truncate=False)

# --------------------------------------------------
# 7. 使用 Parquet 格式存储并读取
# --------------------------------------------------
# 存储为 Parquet 格式
df.write.parquet('fire_data.parquet')

# 读取 Parquet 文件
parquet_df = spark.read.parquet('fire_data.parquet')
parquet_df.show()