from pyspark.sql.functions import *
from pyspark.sql.session import SparkSession

spark = (
    SparkSession
    .builder
    .config("spark.sql.shuffle.partitions", 1)  # 设置shuffle之后的分区数
    .master('local')
    .appName('sql')
    .getOrCreate()
)

# 1,csv
students_df = (
    spark
    .read
    .format('csv')
    .option('sep', ',')
    .schema('id string, name string, age int, sex string, clazz string')
    .load('../../data/students.txt')
)

#在sparksql中的shuffle之后默认分区数时200
clazz_num = students_df.groupby('clazz').agg(count('clazz').alias('num'))


#保存为csv格式
clazz_num.write.format('csv').mode('overwrite').option('sep','\u0001').save('../../data/clazz_num')

#2,json
#spark会自动推断json数据中的表结构
data_json = spark.read.json('../../data/data.json')

data_json.printSchema()

#复杂的json解析
data_json\
    .select(explode('roles').alias('roles'))\
    .select('roles.id','roles.role','roles.works_count')\
    .show(truncate=False)


#3,parquet
#parquet是一种自带表结构和压缩的格式
#保存为parquet
students_df.write.format('parquet').mode('overwrite').save('../../data/students_parquet')

#读取parquet
students_parquet = spark.read.format('parquet').load('../../data/students_parquet')
students_parquet.printSchema()
students_parquet.show(truncate=False)

#4,orc
students_df.write.format('orc').mode('overwrite').save('../../data/students_orc')
orc_parquet = spark.read.format('orc').load('../../data/students_orc')
orc_parquet.printSchema()
orc_parquet.show(truncate=False)