from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, StringType
from pyspark.sql.functions import split, col, when, udf, lit, to_date, explode, from_unixtime
from pyspark.conf import SparkConf
import os
import jieba.posseg as pseg


def jieba_tokenize(text):
    """
    使用jieba的词性标注进行分词，筛选出名词和人名。
    参数:
    text: str，待分词的文本。
    返回:
    str，筛选后的词语列表，以逗号分隔。
    """
    # 使用jieba的词性标注进行分词
    words = pseg.cut(text)
    # 筛选名词和人名
    filtered_words = [word for word, flag in words if flag == 'n' or flag == 'nr']
    # 返回筛选后的词语列表
    return ','.join(filtered_words)


def main():
    """
    主函数，创建SparkSession，读取数据，并使用jieba分词处理。
    """
    # 初始化SparkSession
    appName = "pyspark-example"
    os.environ["SPARK_HOME"] = "/Users/edy/Downloads/spark-3.5.1-bin-hadoop3"
    os.environ["PYSPARK_DRIVER_PYTHON"] = "/opt/homebrew/anaconda3/bin/python"
    os.environ["PYSPARK_PYTHON"] = "/opt/homebrew/anaconda3/bin/python"
    #  config("hive.metastore.uris", "thrift://localhost:10000"). \
    spark = SparkSession.builder. \
        master("yarn"). \
        config("spark.submit.deployMode", "client"). \
        config("hive.metastore.uris", "thrift://localhost:9083"). \
        enableHiveSupport(). \
        appName(appName). \
        getOrCreate()
    # 从HDFS读取文本数据，并进行处理
    df = spark.read.text("hdfs:////user/hive/warehouse/hot_baidu_session/ds=20240524/*"). \
        withColumn("split_col", split(col("value"), "\t")). \
        withColumn("createtime", to_date(from_unixtime(col("split_col")[4] / 1000))). \
        withColumn("selected_value", when(
        col("split_col")[2] != "null", col("split_col")[2]
    ).otherwise(col("split_col")[1])). \
        drop("split_col")
    # 定义并注册jieba分词的UDF
    jieba_udf = udf(jieba_tokenize, StringType())
    # 对选定的值进行jieba分词处理
    df_with_tokens = df.withColumn("end_value", jieba_udf(df["selected_value"]))[['end_value', "createtime"]]. \
        withColumn("end_values", split(col("end_value"), ",")). \
        select("createtime", explode(col("end_values")).alias("word")). \
        withColumn("count_word", lit(1))
    # 显示处理结果
    df_with_tokens.write.mode("append").insertInto("default.hot_baidu_session_word_count")
    # spark.sql("select * from default.hot_baidu_session_word_count")
    # 停止SparkSession
    spark.stop()


if __name__ == "__main__":
    main()
