import org.apache.spark.sql.SparkSession

object qq {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("Spark Pi")
      .getOrCreate()
    val sc = spark.sparkContext
    val first_half = sc.textFile("D:\\pp\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("D:\\pp\\Employee_salary_second_half.csv")
    // 使用mapPartitionsWithIndex方法跳过CSV文件的标题行
    val drop_first = first_half.mapPartitionsWithIndex((ix, it) => {

      if (ix == 0) it.drop(1)
      else
      it
    })
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)

      else
      it
    })
    // 将drop_first中的每一行转换为(员工名, 工资)元组
    val split_first = drop_first.map(
    Line => {
      val data = Line.split(","); (data(1), data(6).toInt)
    }
    )
    //使用逗号分割每行数 据， 提取第二列和第七列数据，并将第七列转换为整数
    val split_second = drop_second.map(
    Line => {
      val data = Line.split(","); (data(1), data(6).toInt)
    }
    )
    //求薪资高于20W的员工姓名
    val filter_first=split_first.filter(x => x._2 > 200000).map(x => x._1)
    val filter_second=split_second.filter(x => x._2 > 200000).map(x => x._1)
    val name=filter_first.union(filter_second).distinct()
    name.collect().foreach(println)

    //求每位员工2020年的每月平均实际薪资，并保存结果文件
    val salary = split_first.union(split_second)
    val avg_salary = salary.combineByKey(
    count => (count, 0),
      (acc:(Int, Int), count) => (acc._1 + count, acc._2 + 0),
      (acc1:(Int, Int), acc2:(Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2)
    )
    avg_salary.map(x => (x._1, x._2._1.toDouble / 12)).foreach(println)
    val total = split_first.join(split_second).join(salary).join(avg_salary).map(
    x => Array(x._1, x._2._1._1._1, x._2._1._1._2,
    x._2._1._2, x._2._2).mkString( ","))
    total.repartition(1).saveAsTextFile("C:\\Users\\Administrator\\Desktop\\save")


  }
}