package org

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

object zkk {
  def main(args: Array[String]): Unit = {
    //spark运行环境
    val  spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    //1.创建RDD
    val data1 =sc.parallelize(List(68,98,75,80,72),1)//分区会打散数据
    val data2 = sc.makeRDD(List("how are you"," I am fine" ,"think you"))
    //2.分析数据 map 映射 filter过滤 flatMap 扁平映射 sortBy排序  take取前n个值
    val res1 = data1.map(x => x+2).filter(_ >= 80).sortBy(y => y,false).take(1)
    val res2 = data2.flatMap(_.split(""))

    //3.打印结束
    data1.foreach(System.out.println)
    data2.collect.foreach(println)
    sc.stop()

    //获取上半年实际薪资排名前3的员工信息
    val first_half = sc.textFile("E:\\spark\\Employee_salary_first_half.csv")
    //去除首行数据
    val drop_first = first_half.mapPartitionsWithIndex((ix,it) => {
    if(ix ==0) it.drop(1)
    it
    })
    //分隔段出第2列员工姓名和第7列实际新员工信息薪资数据
    val split_first = drop_first.map(line => {val data = line.split(",");
    (data(1),data(6).toLongOption)})
    //排序
    val sort_first = split_first.sortBy(x => x._2,ascending=false).take(3)

    // 输出结果
    sort_first.foreach(println)
    // 停止 SparkContext
    sc.stop()
  }
}

