package homework

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo2Work {
  def main(args: Array[String]): Unit = {
    /**
     *
     * 2、统计总分大于年级平均分的学生
     *
     * 1、创建Spark连接
     *
     */
    val conf = new SparkConf()
    conf.setAppName("homeWork2")
//    conf.setMaster("local")
    val sc = new SparkContext(conf)

    /**
     * 2、读取文件
     */

    val sRDD: RDD[(String, String, Double)] = sc.textFile("/shujia/student/score.txt")
      .map(line => line.split(","))
      .filter(str => str.length == 3)
      .map {
        case Array(sid: String, cid: String, sco: String) =>
          (sid, cid, sco.toDouble)
      }

    /**
     * 3、求学生的总分
     */
    val sum_score: RDD[(String, Double)] = sRDD.map {
      case (sid: String, _: String, sco: Double) =>
        (sid, sco)
    }
      .reduceByKey((x, y) => x + y)

    /**
     * 4、求平均分
     */
    val Num: Long = sum_score.count()
    val Avg_Score: Array[Double]= sum_score.map {
      case (_: String, sco: Double) =>
        (1, sco)
    }
      .reduceByKey((x, y) => x + y)
      .map(kv => kv._2 / Num)
      .take(1)

    /**
     * 过滤出总分大于平均分的学生
     *
     * 372.5605605605606
     */
    val result: RDD[(String, Double)] = sum_score
      .filter(Dou=>Dou._2 > Avg_Score.head)
      .map {
        case (sid: String, sco: Double)=>
          (sid,sco)
      }
    result.saveAsTextFile("/shujia/score_avg")

  }

}
