package org.example

import org.apache.spark.api.java.JavaRDD.fromRDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types._
object data1_core3 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("SaveTocCSV")
      .getOrCreate()
    val sc = spark.sparkContext
//    键值对kv>>一个键值对就是一个元组

    val data1 = sc.makeRDD(List(("张三", 2000), ("李四", 2500), ("王五", 5000), ("张三", 3600)))
    val data2 = sc.makeRDD(List(("张三", 3000), ("赵四", 3500), ("王五", 6000), ("赵六", 4600)))
//      聚合
    data1.reduceByKey((x,y)=>x+y).foreach(println)
//    将相同的key生成新的序列
    data1.groupByKey().foreach(println)
//    合并join》》将相同的key逐一匹配 》》不同的key就不会匹配
//    左（右）合并为主，相同的key逐一匹配，没有的key为null
    data1.join(data2).foreach(println)
    data1.leftOuterJoin(data2).foreach(println)
    data1.rightOuterJoin(data2).foreach(println)
//conbine合并
data1.combineByKey(
  //  v =>(v,1)作为初始值累加
  v => (v, 1),
  (t: (Int, Int), v) => {
    //
    (t._1 + v, t._2 + 1)
  },
  (t1: (Int, Int), t2: (Int, Int)) => {
    (t1._1 + t2._1, t1._2 + t2._2)
  }
) .foreach(println)
//聚合求最大或者最小
    data1.aggregateByKey(0)(math.max(_,_),_+_).foreach(println)

    val first_half = sc.textFile("G:\\Employee_salary_first_half.csv");
    val second_half = sc.textFile("G:\\Employee_salary_second_half.csv")
    // 使用mapPartitionsWithIndex方法跳过CSV文件的标题行
    val drop_first = first_half.mapPartitionsWithIndex((ix,it) => { if (ix ==0) it.drop(1) else it })
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => { if (ix == 0) it.drop(1) else
      it })

    // 将drop_first中的每一行转换为(员工名, 工资)元组
     val split_first = drop_first.map( Line => {val data = Line.split(","); (data(1),data(6).toInt)})
    //使用逗号分割每行数 据， 提取第二列和第七列数据，并将第七列转换为整数
     val split_second = drop_second.map(Line => {val data = Line.split(",");(data(1),data(6).toInt)})
    //求薪资高于20W的员工姓名 、
    val filter_first=split_first.filter(x => x._2 > 200000).map(x => x._1)// 找出1中工 资超过200,000的元组，并只保留员工名 
     val filter_second=split_second.filter(x => x._2 > 200000).map(x => x._1)//x._n,n即使你要找的元素，通过 ._1 来访问第一个元素 a，通过 ._2 来访问第二个元素 b。
     val name=filter_first.union(filter_second).distinct()
    //合并并降重
     name.collect().foreach(println)//逐行打印
    //求每位员工2020年的每月平均实际薪资，并保存结果文件
    val salary = split_first.union(split_second)
    val avg_salary = salary.combineByKey(
      count => (count, 0),
      (acc: (Int, Int), count) => (acc._1 + count, acc._2 + 0),
      (acc1: (Int, Int), acc2: (Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2))
    avg_salary.map(x => (x._1, x._2._1.toDouble / 12)).foreach(println)
    val total = split_first.join(split_second).join(salary).join(avg_salary).map(
      x => Array(x._1, x._2._1._1._1, x._2._1._1._2,
        x._2._1._2
        , x._2._2
      ).mkString(
        ","
      ))
    val df = spark.read
      .option("header", "true") // 如果文件第一行是表头
      .option("delimiter", ",") // 如果数据是逗号分隔
      .csv("F:/save1/part-00000")
    df.write
      .option("header", "true") // 确定是否添加表头
      .format("csv")
      .save("F:\\utputsave") // 保存路径，可按需修改

//    total.repartition(1).saveAsTextFile("F:/savetotal1.csv")

    sc.stop()

  }

}
