package com.shujia.homework

import org.apache.spark.{Partitioner, SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object CourseTop10 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local").setAppName("LogDataCompute")
    val sc = new SparkContext(conf)

    val stuInfoRDD: RDD[(String, (String, String, String, String))] = sc
      .textFile("scala_code/data/students.txt")
      .map {
        case oneLine => {
          val splitRes: Array[String] = oneLine.split(",")
          (splitRes(0), (splitRes(1), splitRes(2), splitRes(3), splitRes(4)))
        }
      }


    sc
      .textFile("spark_code/data/score.txt", 4)
      .map {
        case oneLine => {
          val splitRes: Array[String] = oneLine.split(",")
          (splitRes(0), splitRes(1), splitRes(2).toInt)
        }
      }
      // 考虑每个分组中的数据量较多，如何处理？
    // 1.先对整体进行排序，之后再对数据进行重分区，之后再排序取数据
//      .sortBy(x => (x._2, -x._3))
//      .groupBy(_._2, new Partitioner {
//        override def numPartitions: Int = 9
//        override def getPartition(key: Any): Int = {
//          key.toString match {
//            case "1000001" => 0
//            case "1000002" => 1
//            case "1000003" => 2
//            case "1000004" => 3
//            case "1000005" => 4
//            case "1000006" => 5
//            case "1000007" => 6
//            case "1000008" => 7
//            case "1000009" => 8
//          }
//        }
//      })
//      .flatMap(_._2)
//      .mapPartitions {
//        case iter => {
//          var pm = 0
//          //          iter.take(10)
//          iter.map(x => {
//            pm += 1
//            (x._1, x._2, x._3, pm)
//          })
//        }
//      }
//      .foreach(println)
          // 2.对各组数据求平均分，之后再过来大于平均分的数据进行过滤，之后再做toList排序




    //      .mapValues {
    //        case iter => {
    //          iter.toList.sortBy(-_._3).take(10)
    //        }
    //      }
    //      .flatMap(_._2)
    //      .map(x => (x._1, (x._2, x._3)))
    //      .join(stuInfoRDD)
    //      .sortBy(x => (x._2._1._1, -x._2._1._2))
    //      .foreach(println)


  }
}
