package org.example
import org.apache.spark.sql.SparkSession
import org.apache.spark.rdd.RDD
object sparkBaseYun1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("sparkBase")
      .getOrCreate()
    val sc = spark.sparkContext
    val rdd:RDD[Int]=sc.makeRDD(List(1,4,3,2,5),1)
    val mapRDD = rdd.map(_+2).sortBy(tp => tp).filter(_%2==0).take(1)
    mapRDD.foreach(System.out.println)
//    val first_half = sc.textFile("C:\\Users\\Administrator\\Desktop\\Employee_salary_first_half.csv")
//
//
//    val rdd1 = sc.parallelize(List(1, 2, 3, 4), 2)
//    val rdd2 = sc.parallelize(List(3, 4, 5, 6), 2)
//    val interRDD = rdd1.intersection(rdd2)
//    interRDD.foreach(System.out.println)
//
//    val unRDD = rdd1.union(rdd2)
//    unRDD.collect().foreach(System.out.println)
//    val suRDD = rdd2.subtract(rdd1)
//    suRDD.foreach(System.out.println)
//    val score = sc.parallelize(List(('a', 60), ('b', 70), ('c', 80), ('a', 60)))
//    //score.foreach(_._2 >= 80).collect().foreach(println)
//    score.distinct().collect().foreach(println)
//
//
//
//
//    val data1 = sc.parallelize(List(('a', 60), ('b', 70), ('c', 80), ('a', 60)))
//    val data2 = sc.makeRDD(List(('a', 6), ('b', 7), ('c', 8), ('d', 5)))
////    data1.reduceByKey((x,y) => x+y).foreach(println)
//
////    data1.groupByKey().foreach(println)
////    data1.groupBy(_._1).foreach(println)
//
////    data1.join(data2).foreach(println)
//    data1.leftOuterJoin(data2).foreach(println)
//    data1.rightOuterJoin(data2).foreach(println)
//
//    data1.combineByKey(
//      v => (v,1),
//      (t:(Int,Int),v) => {
//        (t._1+v,t._2+1)
//    },
//      (t1:(Int,Int),t2:(Int,Int)) => {
//        (t1._1+t2._1,t1._2+t2._2)
//      }
//    ).foreach(println)
//
//    data1.aggregateByKey(0)(math.max(_,_),_+_).foreach(println)
//
//
//    sc.stop()
    val first_half = sc.textFile("C:\\Users\\Administrator\\Desktop\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("C:\\Users\\Administrator\\Desktop\\Employee_salary_second_half.csv")
    val drop_first = first_half.mapPartitionsWithIndex((ix,it) => {
      if (ix == 0) it.drop(1)
      else
        it
    })
    val drop_second = second_half.mapPartitionsWithIndex((ix,it) =>{
      if (ix == 0) it.drop(1)
      else
        it
    })
    val split_first = drop_first.map(
      Line => {val data = Line.split(",");(data(1),data(6).toInt)})
    val split_second = drop_second.map(
      Line => {val data = Line.split(",");(data(1),data(6).toInt)})
    val filter_first=split_first.filter(x => x._2 > 200000).map(x => x._1)
      val filter_second=split_second.filter(x => x._2 > 200000).map(x => x._1)
      val name=filter_first.union(filter_second).distinct()
      name.collect().foreach(println)

    val salary = split_first.union(split_second)
    val avg_salary = salary.combineByKey(
      count => (count, 0),
      (acc:(Int, Int), count) => (acc._1 + count, acc._2 + 0),
      (acc1:(Int, Int), acc2:(Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2)
    )
    avg_salary.map(x => (x._1, x._2._1.toDouble / 12)).foreach(println)
    val total = split_first.join(split_second).join(salary).join(avg_salary).map(
      x => Array(x._1, x._2._1._1._1, x._2._1._1._2,x._2._1._2, x._2._2).mkString(",")
    )
    total.repartition(1).saveAsTextFile("C:\\Users\\Administrator\\Desktop\\save")
  }
}
