package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo20Student {
  def main(args: Array[String]): Unit = {


    val conf: SparkConf = new SparkConf()
      .setAppName("student")
      .setMaster("local[4]")

    val sc = new SparkContext(conf)


    /**
      *
      * 2、统计总分大于年级平均分的学生 [学号，姓名，班级，总分]
      * 1、统计总分
      * 2、统计年级平均分
      * 3、过滤总分是否打印平均分
      * 4、整理数据
      *
      */


    //1、读取数据
    val scores: RDD[String] = sc.textFile("spark/data/score.txt")


    scores.cache()

    //转换成kv格式
    val kvScore: RDD[(String, Double)] = scores.map(line => {
      val split: Array[String] = line.split(",")
      (split(0), split(2).toDouble)
    })

    /// 统计总分
    val sumScore: RDD[(String, Double)] = kvScore.reduceByKey(_ + _)


    //统计平均分
    val avgScore: Double = sumScore.map(_._2).sum() / sumScore.count()

    println(s"年级平均分:$avgScore")


    //过滤总分是否打印平均分
    val filterScore: RDD[(String, Double)] = sumScore.filter(_._2 > avgScore)


    //管理学生表整理数据
    val students: RDD[String] = sc.textFile("spark/data/students.txt")

    //将学生表转换成kv格式
    val kvStudent: RDD[(String, (String, String))] = students.map(line => {
      val split: Array[String] = line.split(",")
      val id: String = split(0)
      val name: String = split(1)
      val clazz: String = split(4)

      (id, (name, clazz))
    })

    //关联学生信息表
    val joinRDD: RDD[(String, (Double, (String, String)))] = filterScore.join(kvStudent)


    //整理数据
    val resultRDD: RDD[String] = joinRDD.map {
      case (id: String, (sumScore: Double, (name: String, clazz: String))) =>
        s"$id,$name,$clazz,$sumScore"

    }


    //保存数据
    resultRDD.saveAsTextFile("spark/data/sumScore")
  }

}
