#创建spark sql环境
from pyspark.sql.functions import explode, split
from pyspark.sql.session import SparkSession

spark = SparkSession.builder.master('local').appName('word_count').getOrCreate()

# 读取数据
# Dataframe:
# Dataframe:DF底层DRR,在RDD上增加了表结构，满足写sql的条件
lines_df = (
    spark.read.format('csv')
    .schema('line string')
    .option('sep','|')
    .load('../../data/word.txt')
)
# 查看数据
lines_df.show()
# 将DF注册成表
lines_df.createOrReplaceTempView('lines')

# 写sql处理数据

count_df = spark.sql("""
    select word,count(1) as num from
    lines
    lateral view explode(split(line,',')) t as word
    group by word
""")

count_df.show()

#DSL:类sql写法
(
    lines_df
    .select(explode(split('line',',')).alias('word'))
    .groupby('word')
    .count()
    .show()
)