package test

import org.apache.spark.sql.SparkSession

import scala.math.random
object c1 {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .getOrCreate()

    val slices = if (args.length > 0) args(0).toInt else 2
    val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
    val count = spark.sparkContext.parallelize(1 until n, slices).map { i =>
      val x = random * 2 - 1
      val y = random * 2 - 1
      if (x * x + y * y <= 1) 1 else 0
    }.reduce(_ + _)
    println(s"Pi is roughly ${4.0 * count / (n - 1)}")

    // 数据处理常用方法：map映射  filter过滤 sortBy排序
    val sc = spark.sparkContext
    val listRDD = sc.makeRDD(1 to 10, 1)
    //val mapRDD = listRDD.map(x => x * 2)
    val mapRDD = listRDD
      .map(x => x * 2) //打散
      .filter(y => y > 8)
      .sortBy(z => z, true)
    mapRDD.foreach(System.out.println)

    // flatMap扁平映射
    val list = sc.parallelize(List(Array(1, 2, 3), Array(4, 5, 6)))
    val mapInfo = list.flatMap(tp => tp)
    //mapInfo.foreach(System.out.println)
    mapInfo.collect().foreach(println)

    //练习：读取上半年实际薪资排名前3的员工信息
    val first_half = sc.textFile("C:/Users/Administrator/Desktop/Employee_salary_first_half.csv")
    //去除首行数据
    val drop_first = first_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      it
    })

  //分隔取出第二列员工姓名和七七列实际薪资员工信息数据
  val split_first = drop_first.map( line => {
    val data = line.split(",")
    (data(1), data(6).toLongOption)
  })
  //排序
  val sort_first = split_first.sortBy(x => x._2, false)
  sort_first.take(3).foreach(System.out.println)

  spark.stop()


}


}
