import org.apache.spark.sql.SparkSession

object WordProcessing {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("Word Processing")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext

    // 创建一个包含 10 个元素的 RDD
    val rdd = sc.parallelize(Seq(
      "Hello World", "Scala Spark", "Big Data", "Machine Learning",
      "Data Science", "Apache Hadoop", "Cloud Computing", "Artificial Intelligence",
      "Deep Learning", "Natural Language Processing"
    ))

    // 按空格切分单词并转换为小写
    val processedRDD = rdd.flatMap(_.split(" ")) // 按空格切分
                          .map(_.toLowerCase)   // 转换为小写

    // 打印结果
    processedRDD.collect().foreach(println)

    spark.stop()
  }
}
