from pyspark.sql import SparkSession, functions as F
from pyspark.sql.types import ArrayType, StringType

if __name__ == '__main__':
    spark = SparkSession.builder. \
        appName("test"). \
        master("local[*]"). \
        config("spark.sql.shuffle.partitions", 2). \
        getOrCreate()

    sc = spark.sparkContext

    rdd = sc.parallelize([["hadoop spark flink"], ["hadoop flink java"]])
    df = rdd.toDF(["line"])


    def split_line(data: str):
        return data.split(" ")


    # TODO 1 方式1构建udf
    udf2 = spark.udf.register("udf1", split_line, ArrayType(StringType()))

    # DSL风格
    df.select(udf2(df['line'])).show(truncate=False)
    # SQL风格
    df.createOrReplaceTempView("lines")
    spark.sql("select udf1(line) from lines").show(truncate=False)

    # TODO 2 方式2构建udf
    udf3 = F.udf(split_line, ArrayType(StringType()))
    df.select(udf3(df['line'])).show(truncate=False)
