from pyspark.sql.functions import *
from pyspark.sql.session import SparkSession
from pyspark.sql.types import Row
from pyspark.sql.functions import *

# 创建spark sql执行环境
# enableHiveSupport:开启hive的元数据支持，可以读取到hive中的表
spark = SparkSession \
    .builder \
    .appName("rdd") \
    .config("spark.sql.shuffle.partitions", 1) \
    .enableHiveSupport() \
    .getOrCreate()

spark.sql("drop table if exists clazz_num")

# 直接使用hive的表编写sql进行计算
spark.sql("""
    create table clazz_num as 
    select clazz,count(1) as num
    from 
    students
    group by clazz
""")

# 读取hive的表得到DF
students_df = spark.table("students")

# 统计计算
clazz_num = students_df \
    .groupBy("clazz") \
    .agg(count("*").alias("num"))

# 将DF保存到hive的表中
# 1、saveAsTable
# 如果表不存在会自动创建表，相当于 create table clazz_num1 as
# 默认parquet格式
clazz_num.write.saveAsTable("clazz_num1", mode="overwrite")

# 2、insertInto
# 需要手动创建表
spark.sql("""
CREATE TABLE if not exists clazz_num2(
    clazz string ,
    num BIGINT
) 
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' 
STORED AS textfile
location '/data/clazz_num2'; 
""")
clazz_num.write.mode("overwrite").insertInto("clazz_num2")


