from pyspark.sql.functions import *
from pyspark.sql.session import SparkSession
from pyspark.sql.types import Row
from pyspark.sql.functions import *
import jieba

# 创建spark sql执行环境
# enableHiveSupport:开启hive的元数据支持，可以读取到hive中的表
spark = SparkSession \
    .builder \
    .appName("rdd") \
    .config("spark.sql.shuffle.partitions", 1) \
    .enableHiveSupport() \
    .getOrCreate()

# 读取数据
comments_df = spark.read \
    .format("csv") \
    .schema("id STRING,comment_time STRING,comment STRING,a STRING ,b STRING") \
    .load("../../data/comment.txt")

# 取出评价转换成RDD
comments_rdd = comments_df \
    .where(col("comment").isNotNull()) \
    .select("comment") \
    .rdd

# 使用jieba分词器对评价进行分词
words_rdd = comments_rdd.map(lambda row: Row(words=jieba.lcut(row.comment)))

# 将分好的结果转换成DF
words_df = spark.createDataFrame(words_rdd)

# 打印表结构
words_df.printSchema()

# 统计词频
words_df \
    .select(explode("words").alias("word")) \
    .groupBy("word") \
    .agg(count("*").alias("num")) \
    .orderBy(col("num").desc()) \
    .show()
