from pyspark.sql.session import SparkSession

from pyspark.sql.functions import *
from setuptools.command.alias import alias

# 1、创建spark sql执行环境
spark = SparkSession \
    .builder \
    .master("local") \
    .config("spark.sql.shuffle.partitions", 1) \
    .getOrCreate()

# 2、读取数据，得到DataFrame: DF底层是RDD
# schema: 指定表结构
# sep： 指定分隔符
students_df = spark.read \
    .schema("id STRING,name STRING,age INT,sex STRING,clazz STRING") \
    .option("sep", ",") \
    .csv("../../data/students.txt")

students_df.show()

# 1、DSL API（类SQl）
students_df \
    .groupBy("clazz") \
    .agg(count("clazz").alias("num")) \
    .show()

# 2、SQL API
students_df.createOrReplaceTempView("students")

# 执行查询语句，返回DF
# spark sql会将sql语句解析成RDD的算子去执行
clazz_num_df = spark.sql("""
select clazz,count(1) as num
from students
group by clazz
""")
clazz_num_df.show()

while True:
    pass
