# 1) 拿到Spark入口对象 SparkSession
# yarn/mesos/local
# local : 在本地内存运行
from pyspark.sql import SparkSession
from pyspark.sql.functions import when, col
from pyspark.sql.types import StructType, StructField, IntegerType, DoubleType

spark = SparkSession.builder\
    .appName("HelloSpark")\
    .master("local")\
    .getOrCreate()

schema = StructType([
    StructField("id", IntegerType()),
    StructField("year", IntegerType()),
    StructField("month", IntegerType()),
    StructField("day", IntegerType()),
    StructField("hour", IntegerType()),
    StructField("season", IntegerType()),
    StructField("pm", DoubleType())
])

df = spark.read\
    .schema(schema) \
    .option('header', True)\
    .csv('dataset/beijingpm_with_nan.csv')

df.show()

# 1) drop
# Nan
df.na.drop(how='any', subset=["pm"]).show()
# null
df.na.drop(how='any', subset=['year']).show()

df.na.drop(how='any', subset=['year', 'pm']).show()
df.na.drop(how="all", subset=['year', 'pm']).show()

# fill 给一个默认值

df.na.fill(50, subset=["pm"]).show(100000)

# replace 替换值
df.na.replace(2010, 2025, subset=['year']).show()

# if else
df = spark.read\
    .option('header', True)\
    .csv('dataset/BeijingPM20100101_20151231.csv')

clean_df = df.withColumn('pm', when(col('PM_Dongsi') == 'NA', 50)
              .otherwise(col('PM_Dongsi').cast(DoubleType())))

clean_df.printSchema()
clean_df.show(100000)
