//import org.apache.spark.sql.SparkSession
object MapContext {
  def main(args: Array[String]): Unit = {
  //  val sc = new org.apache.spark.SparkContext(new org.apache.spark.SparkConf().setAppName("SalaryRank"))

    // 1. 读取CSV文件
    //val rdd = sc.textFile("D:\\Employee_salary_first_half.csv")

    // 2. 删除标题行
    //val data = rdd.zipWithIndex().filter(_._2 > 0).map(_._1)

    // 3. 提取姓名和薪资
   // val processed = data.map { line =>
    //  val cols = line.split(",")

      try {
     //   val name = cols(1).trim
      //  val salary = cols(6).trim.toInt
      //  (name, salary)
      } catch {
        case e: Exception => ("Invalid Data", 0)
      }
    }//.//filter(_._1 != "Invalid Data")

    // 4. 按薪资降序排序
    //val sorted = processed.sortBy(-_._2)

    // 5. 获取前3名
  //  val top3 = sorted.take(3)

    // 打印结果
    println("上半年实际薪资排名前3的员工：")
    //top3.foreach { case (name, salary) =>
      //println(s"$name: ¥${"%,d".format(salary)}")
    //}

  //}

}
