package org.example

import org.apache.spark.sql.SparkSession


object data2_score {
  def main(args: Array[String]): Unit = {
    //spark运行环境
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()

    val sc = spark.sparkContext
    //多表操作
    val data1=sc.parallelize(List(60,90,75,80,75),1)
    val data2=sc.makeRDD(List(72,80,75,66,95),1)
    // 交集intersection
    //data1.intersection(data2).foreach(println)
    // 并集union
    //data1.union(data2).foreach(println)

    // 差集data1有而data2没有
    //data1.subtract(data2).foreach(println)
    // 拉链 要保证rdd分区数一致zip输出元祖（key value）
    //data1.zip(data2).foreach(println)
    // 笛卡尔积1和2中每个元素组合成新的一组，共5*5=25个
    //data1.cartesian(data2).foreach(println)
    //filter过
    //data1.filter(_>=90).foreach(println)
    //去重distinct
    //data1.distinct().foreach(println)
    val first_half = sc.textFile("E:\\ab06\\spark\\Employee_salary_first_half.csv")
    val second_half= sc.textFile("E:\\ab06\\spark\\Employee_salary_second_half.csv")
    val drop_first = first_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      it
    })
    val drop_second = first_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      it
    })
    val split_first=drop_first.map(
      line=>{val  data=line.split(",")
        try{
          (data(1),data(6).toInt)
        }
          catch{
            case e:NumberFormatException=>(data(1),0)
      }
  })
    val split_second=drop_second.map(
      line=>{val data=line.split(",")
          try {
            (data(1), data(6).toInt)
          }
          catch {
            case e: NumberFormatException => (data(1), 0)
          }
        })
    val filter_first=split_first.filter(x=>x._2>200000).map(x=>x._1)
    val filter_second=split_first.filter(x=>x._2>200000).map(x=>x._1)
    val name=filter_first.union(filter_second).distinct()
    name.collect.foreach(println)


    val salary = split_first.union(split_second)
    val avg_salary = salary.combineByKey(
      count => (count, 0),
      (acc: (Int, Int), count) => (acc._1 + count, acc._2 + 0),
      (acc1: (Int, Int), acc2: (Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2)
    )
    avg_salary.map(x => (x._1, x._2._1.toDouble / 12)).foreach(println)
   // avg_salary.map(x => (x._1, x._2._1.toDouble / 12)).collect
    val total = split_first.join(split_second).join(salary).join(avg_salary).map(
      x => Array(x._1, x._2._1._1._1, x._2._1._1._2,
        x._2._1._2, x._2._2).mkString(
        ","))
    total.repartition(1).saveAsTextFile("E:\\ab06\\spark\\save")
    //total.toDF().write.csv("E:\\ab06\\spark\\save")







    sc.stop()
  }
}
