package org.zhi

import org.apache.spark.sql.SparkSession

/**
 * @className: SparkDemo
 * @author: Administrator
 * @date: 2025-02-13 14:19
 */
object SparkDemo {
//数据获取和统计
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .master("local[*]").getOrCreate()
    val sc = spark.sparkContext
    //从指定额路径读取文本文件，此创建一个RDD对象，
    val input = sc.textFile("src/main/resources/input.txt")
      //替换
      .map(row => row.replace(".", " "))
      //将每行按照空格拆分成单词，flatMap 将其展平为一个单词序列
      .flatMap(_.split(" "))
      //将创建一个map
      .map(word => (word, 1))
      //使用reduceByKey 对每个单词进行统计
      .reduceByKey(_ + _)
    input.collect().foreach(println)
    sc.stop()
  }
}

//列表中筛选出所有偶数
object ListOperations {
  def main(args: Array[String]): Unit = {
    val numbers = List(1, 2, 3, 4, 5, 6)
    val evenNumbers = numbers.filter(_ % 2 == 0)
    val sumOfEvenNumbers = evenNumbers.sum
    println(s"Even numbers: $evenNumbers, Sum: $sumOfEvenNumbers")
  }
}