package org.example
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions
object test3 {
 def main(args:Array[String]):Unit = {
   val spark = SparkSession
     .builder()
     .master("local[*]")
     .appName("spark")
     .getOrCreate()
   val sc =spark.sparkContext
   val first_half = sc.textFile("E:\\a\\Employee_salary_first_half.csv")
   val second_half = sc.textFile("E:\\a\\Employee_salary_second_half.csv")
   val drop_first = first_half.mapPartitionsWithIndex((ix,it) => {
     if (ix ==0) it.drop(1)
     else
     it
   })
   val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
     if (ix == 0) it.drop(1)
     else
     it
   })
   val split_first = drop_first.map(Line => {val data = Line.split(",");(data(1),data(6).toInt)})
   val split_second = drop_second.map(Line => {val data = Line.split(",");(data(1),data(6).toInt)})
   val filter_first=split_first.filter(x=>x._2 >200000).map(x=>x._1)
   val filter_second=split_second.filter(x=>x._2>200000).map(x=>x._1)
   val name=filter_first.union(filter_second).distinct()
   name.collect().foreach(println)

   val salary = split_first.union(split_second)
   val avg_salary = salary.combineByKey(
     count => (count, 0),
       (acc: (Int, Int), count) => (acc._1 + count, acc._2 + 0),
       (acc1: (Int, Int), acc2: (Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2)
   )
   avg_salary.map(x => (x._1, x._2._1.toDouble / 12)).foreach(println)

   val total = split_first.join(split_second).join(salary).join(avg_salary).map(
   x => Array(x._1, x._2._1._1._1, x._2._1._1._2,
   x._2._1._2, x._2._2).mkString(
  ",") )
   total.repartition(1).saveAsTextFile("C:\\Users\\Administrator\\Desktop\\111")
 }
}
