import os

from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *

os.environ.setdefault('HADOOP_HOME', 'D:\\hadoop-2.9.2')

# Driver
spark = SparkSession \
    .builder \
    .master('local') \
    .appName('HelloSpark') \
    .getOrCreate()

schema = StructType([
    StructField("id", IntegerType()),
    StructField("year", IntegerType()),
    StructField("month", IntegerType()),
    StructField("day", IntegerType()),
    StructField("hour", IntegerType()),
    StructField("season", IntegerType()),
    StructField("pm", DoubleType())
])

df = spark.read.schema(schema) \
    .option('header', True) \
    .csv('dataset/beijingpm_with_nan.csv')

clean_df = df.na.drop(how='any', subset=['season', 'pm'])

clean_df.groupby('year') \
    .agg(avg('pm').alias('pm_avg')) \
    .orderBy('pm_avg') \
    .show(truncate=False)

# spark SQL.
# 1. API的方式。可读性，灵活
result_df = clean_df.groupby('year', 'month') \
    .agg(avg('pm').alias('pm_avg'), max('pm'), min('pm'), count('pm')) \
    .orderBy('pm_avg') \
    .where('pm_avg > 80')

# 2. SQL的方式
# 2.1 dataframe注册成一张表
clean_df.createOrReplaceTempView("pm_beijing")

spark.sql("""
SELECT year, month, avg(pm) AS avg_pm, max(pm), min(pm), count(pm) 
FROM pm_beijing 
GROUP BY year,month 
HAVING avg_pm > 80
ORDER BY avg_pm
""").show(truncate=False)