package org.example

import org.apache.spark.sql.SparkSession

object le {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc =spark.sparkContext
    val data1=sc.parallelize(List(60,90,76,89,73),1)//分区会打散数据
    val data2=sc.makeRDD(List(1 to 10))
    val res1=data1.map(_ +2).filter(_ >=80).sortBy(y=>y,false).take(1)
    //相当于x=>x+2            //过滤出大于80分的                 //取出数值最大的
    res1.foreach(System.out.println)
    data2.collect().foreach(println)
    sc.stop()
  }
}
