import org.apache.spark.sql.SparkSession

object Groupbykey1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("RDDPartitionExample")
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext

    val first = sc.textFile("C:\\Users\\PC-608\\Desktop\\Employee_salary_first_half.csv")
    val second = sc.textFile("C:\\Users\\PC-608\\Desktop\\Employee_salary_second_half.csv")

    val drop_first = first.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      it
    })
    val drop_second = second.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      it
    })
    val split_first = drop_first.map(
      line => {
        val data = line.split(",");
        (data(1), data(6).toInt)
      })
    val split_second = drop_second.map(
      line => {
        val data = line.split(",");
        (data(1), data(6).toInt)
      })

    val all_salary = split_first.union(split_second)
    val salary = all_salary.reduceByKey((a, b) => a + b)
    println(salary.collect().mkString(","))
  }
}
