from pyspark.sql import SparkSession
from pyspark.sql.functions import avg, col, max, min
from pyspark.sql.types import StructType, DoubleType, StringType, StructField, IntegerType, BooleanType, FloatType

spark = SparkSession \
    .builder \
    .master('local') \
    .appName('HelloSpark') \
    .getOrCreate()


schema = StructType([StructField("id", IntegerType()),
        StructField("year", IntegerType()),
        StructField("month", IntegerType()),
        StructField("day", IntegerType()),
        StructField("hour", IntegerType()),
        StructField("season", IntegerType()),
        StructField("pm", DoubleType())
        ]
    )


df = spark.read\
    .option("header", True)\
    .schema(schema)\
    .csv("dataset/beijingpm_with_nan.csv")

# df.show()

# 填充
# df.na.fill(0).show()
# df.na.fill(1, ['pm']).show()
# df.na.fill({'season': 1, 'pm': 50})

cleaned_df = df.na.drop(how='any', subset=['season', 'pm'])

cleaned_df.groupby('year')\
    .agg(avg('pm').alias('pm_avg'))\
    .orderBy('pm_avg')\
    .show(truncate=False)

cleaned_df.groupby('year', 'month')\
    .agg(avg('pm').alias('pm_avg'), max('pm'), min('pm'))\
    .orderBy(col('pm_avg').desc())\
    .where('pm_avg > 100')\
    .show(truncate=False)

cleaned_df.createOrReplaceTempView('pm_beijing')
spark.sql("""
SELECT year, month, avg(pm) AS avg_pm, max(pm), min(pm)
FROM pm_beijing
GROUP BY year, month
HAVING avg_pm > 100
ORDER BY avg_pm DESC
""").show(truncate=False)


