package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}

object Demo7Join {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setMaster("local") //本地运行
      .setAppName("Demo4Sample")

    val sc = new SparkContext(conf)


    //读取学生表和分数表
    val studentRDD = sc.textFile("data/student.txt")
    val scoreRDD = sc.textFile("data/score.txt")


    //join 算子  两个RDD都需要是kv格式的rdd

    val studentKV = studentRDD.map(line => (line.split(",")(0), line))
    val scoreKV = scoreRDD.map(line => (line.split(",")(0), line))

    val joinRDD = studentKV
      .join(scoreKV)


    /**
      *
      * (1500100980,(1500100980,霍谷槐,22,女,理科一班, 1500100980,1000001,33))
      * (1500100980,(1500100980,霍谷槐,22,女,理科一班, 1500100980,1000002,113))
      * (1500100980,(1500100980,霍谷槐,22,女,理科一班, 1500100980,1000003,54))
      * (1500100980,(1500100980,霍谷槐,22,女,理科一班, 1500100980,1000007,12))
      * (1500100980,(1500100980,霍谷槐,22,女,理科一班, 1500100980,1000008,65))
      * (1500100980,(1500100980,霍谷槐,22,女,理科一班, 1500100980,1000009,29))
      */

    //计算学生总分
    val kvRDD = joinRDD.map(x => {
      val s_id = x._1
      val score = x._2._2.split(",")(2).toInt
      (s_id, score)
    })

    /**
      * (1500100668,146)
      * (1500100668,49)
      * (1500100668,53)
      * (1500100668,78)
      * (1500100668,25)
      * (1500100668,22)
      *
      */

    kvRDD.groupByKey()
      .map(kv => {
        val s_id = kv._1
        val sum = kv._2.sum
        s"$s_id\t$sum"
      })
      //.foreach(println)


    val sumRDD = kvRDD
      .reduceByKey((x, y) => x + y) //统计总分
    //.map(kv => kv._1 + "\t" + kv._2)


    //总分倒序排序
    val sortRDD = sumRDD
      .sortBy(kv => kv._2, ascending = false) //对数据进行排序，指定列  ascending  拍戏方式
      .map(kv => kv._1 + "\t" + kv._2)

    sortRDD.foreach(println)
  }

}
