package test

import org.apache.spark.sql.SparkSession

object dd {



  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .getOrCreate()
     val sc= spark.sparkContext
    //数据RDD，不指定分区，数据会打散
     val  list1 = sc.makeRDD(List(60,70,80,90,78),1)
     val list2 = sc.parallelize(List(66,75,85,78,92),1)
     val list3 = sc.parallelize(List(66,75,85,78,92),1)
    //分析处理（多表）
     //1.交集
       list1.intersection(list2).foreach(println)
    //并集
       list1.union(list2).foreach(println)
    //差集
        list1.subtract(list2).foreach(println)
    //拉链
       list3.zip(list2).foreach(println)
    //笛卡尔积
       list3.cartesian(list1).foreach(println)
    //过滤
    val score =sc.makeRDD(List(("张三",80),("张三",80),("张三",80)))
    //输出结果
     list1.foreach(println)


   //练习1
    val first_half = sc.textFile("C:/Users/Administrator/Desktop/Employee_salary_first_half.csv")
    val second_half = sc.textFile("C:/Users/Administrator/Desktop/Employee_salary_second_half.csv")
       val drop_first = first_half.mapPartitionsWithIndex((ix,it) => {
         if (ix ==0) it.drop(1)
         else it
       })
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      else it
    })
    val split_first =drop_first.map(
      Line =>{val data =Line.split(",");(data(1),data(6).toInt)
      }
    )
    val split_second = drop_second.map(
      Line => {
        val data = Line.split(",");
        (data(1),data(6).toInt)
      }
    )
    val filter_first =split_first.filter(x=> x._2 > 200000).map(x => x._1)
    val filter_second =split_second.filter(x=> x._2 > 200000).map(x => x._1)
    val name =filter_first.union(filter_second).distinct()
    name.collect().foreach(println)





    //练习2
   val salary = split_first.union(split_second)
    val avg_salary = salary combineByKey(
      count => (count, 0),
      (acc: (Int, Int), count) => (acc._1 + count, acc._2 + 0),
        (acc1: (Int, Int), acc2: (Int, Int)) => (acc1._1 + acc2._1,acc1._2 + acc2._2)
    )
      avg_salary.map(x =>(x._1,x._2._1.toDouble / 12)).foreach(println)
    val  total = split_first.join(split_second).join(salary).join(avg_salary).map(
      x =>Array(x._1,x._2._1._1._1,x._2._1._1._2,x._2._1._2,x._2._2).mkString(","))
    total.repartition(1).saveAsTextFile("C:/Users/Administrator/Desktop/save")
    spark.stop()
  }

}

