# -*- coding:utf-8 -*-
# @Author: shenyuyu
# @Time: 2023/6/28 18:01
# @File: h_1.py
from pyspark.sql import functions as F
from pyspark.sql.types import StructType, StringType
from pyspark.sql import SparkSession

if __name__ == '__main__':
    spark = SparkSession.builder.appName("a").master("local[*]").getOrCreate()
    sc = spark.sparkContext
    rdd = sc.textFile("hdfs://hadoop1:9820/a.txt")\
        .flatMap(lambda x: x.split(" "))\
        .map(lambda x: [x])
    print(rdd.collect())
    df = rdd.toDF(["word"])
    df.createTempView("words")
    spark.sql("select word, count(*) as cnt from words group by word order by cnt desc").show()
    df.show()

    df1 = spark.read.format("text").load("hdfs://hadoop1:9820/a.txt")
    df2 = df1.withColumn("value", F.explode(F.split(df1['value'], " ")))
    df2.groupby("value").count().withColumnRenamed("count", "cnt")\
        .orderBy("cnt", ascending=False).show()
