package com.qingguo.homework

import org.apache.spark.{SparkConf, SparkContext}

object student1 {

  def main(args: Array[String]): Unit = {

    /**
      * 统计年级排名前十学生各科的分数 [学号,学生姓名，学生班级，科目名，分数]
      *
      * 1500100929
      * 1500100080
      * 1500100308
      * 1500100873
      * 1500100418
      * 1500100258
      * 1500100875
      * 1500100930
      * 1500100596
      * 1500100834
      */


    //创建Spark任务
    val conf = new SparkConf()
    conf.setAppName("Student1").setMaster("local")
    val sc = new SparkContext(conf)

    //读文件
    val score = sc.textFile("Spark/data/score.txt")

    /**
      * 计算每个学生总分，取top10
      */

    //取出成绩
    val scoreRDD = score.map(line => {
      val split = line.split(",")
      val id = split(0)
      val score = split(2).toInt
      (id, score)
    })

    //计算每个学生的总分
    val sumScoresRDD = scoreRDD.reduceByKey(_ + _)

    //    //以学生id进行分组
    //    val groupSocre = scoreRDD.groupBy(_._1)
    //
    //    val sumScores = groupSocre.map(kv => {
    //      val id = kv._1
    //      val ss = kv._2
    //
    //      //计算每个学生的总分
    //      val sumScore = ss.map(k => k._2).sum
    //
    //      (id,sumScore)
    //    })

    //top10
    val top10RDD = sumScoresRDD.filter(_._2 >= 586)

    //关联学生信息
    val students = sc.textFile("Spark/data/students.txt")

    val studentsKVRDD = students.map(line => {
      val split = line.split(",")
      val id = split(0)
      val name = split(1)
      val clazz = split(4)
      (id, name + "-" + clazz)
    })

    //关联基础信息
    val join1RDD = top10RDD
      .join(studentsKVRDD)
      .map(line => {
        val id = line._1
        val sumscore = line._2._1
        val name = line._2._2.split("-")(0)
        val clazz = line._2._2.split("-")(1)
        (id, sumscore + "-" + name + "-" + clazz)
      })

    //关联科目信息
    val scoreKVRDD = score.map(line => {
      val split = line.split(",")
      val id = split(0)
      val kmbh = split(1)
      val score = split(2).toInt
      (id, kmbh + "-" + score)
    })

    val join2RDD = top10RDD
      .join(scoreKVRDD)
      .map(line => {
        val id = line._1
        val sumscore = line._2._1
        val kmbh = line._2._2.split("-")(0)
        val score = line._2._2.split("-")(1)
        (id, sumscore + "-" + kmbh + "-" + score)
      })

    //max关联
    val join3RDD = join2RDD.join(join1RDD).map(line => {
      val id = line._1
      val sumscore = line._2._1.split("-")(0)
      val kmbh = line._2._1.split("-")(1)
      val score = line._2._1.split("-")(2)
      val name = line._2._2.split("-")(1)
      val clazz = line._2._2.split("-")(2)
      (kmbh, id + "-" + name + "-" + clazz + "-" + score + "-" + sumscore)
    })

    //关联cource
    val cource = sc.textFile("Spark/data/cource.txt")

    val courceKVRDD = cource.map(line => {
      val split = line.split(",")
      val kmbh = split(0)
      val kmm = split(1)
      (kmbh, kmm)
    })

    val resultRDD = courceKVRDD.join(join3RDD).map(line => {
      val kmm = line._2._1
      val id = line._2._2.split("-")(0)
      val name = line._2._2.split("-")(1)
      val clazz = line._2._2.split("-")(2)
      val score = line._2._2.split("-")(3)
      val sumscore = line._2._2.split("-")(4)
      id + "\t" + name + "\t" + clazz + "\t" + kmm + "\t" + score + "\t" + sumscore
    })

    resultRDD.sortBy(_.split("\t")(1)).foreach(println)

  }
}