package Spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test

class Tests {

  val conf = new SparkConf().setMaster("local[6]").setAppName("test")
  val sc   = new SparkContext(conf)

  /**
   *  每门功课的前5名
   */
  @Test
  def test01(): Unit ={

    // 首先读取数据
    val mathrdd = sc.textFile("src/main/scala/Book/data/result_math.txt")
    val bigdatardd = sc.textFile("src/main/scala/Book/data/result_bigdata.txt")

    // 由于文本文件中存的数据视为记录，所以要转换一下
    val mathmap = mathrdd.map{
      item => val line = item.split("\t");
        (line(0),line(1),line(2).toInt)
    }
    val bigdatamap = bigdatardd.map{
      item => val line = item.split("\t");
        (line(0),line(1),line(2).toInt)
    }

    // 排序，取出每门功课的前五名
    val mathsort = mathmap.sortBy(item => item._3,ascending = false)
    val bigdatasort = bigdatamap.sortBy(item => item._3,ascending = false)

    // 遍历输出结果
    val result1 = mathsort.take(5).foreach(print(_))
    println(result1)
    val result2 = bigdatasort.take(5).foreach(print(_))
    println(result2)

    sc.stop()

  }

  /**
   * 输出单科成绩为100分学生的ID
   *   1.每门功课中100分学生的ID
   *   2.合并两门100分学生的ID并且去重
   */
  @Test
  def test02(): Unit ={

    // 首先读取数据
    val mathrdd = sc.textFile("src/main/scala/Book/data/result_math.txt")
    val bigdatardd = sc.textFile("src/main/scala/Book/data/result_bigdata.txt")

    // 由于文本文件中存的数据视为记录，所以要转换一下
    val mathmap = mathrdd.map{
      item => val line = item.split("\t");
        (line(0),line(1),line(2).toInt)
    }
    val bigdatamap = bigdatardd.map{
      item => val line = item.split("\t");
        (line(0),line(1),line(2).toInt)
    }

    // 排序，取出每门功课的前五名
    val math100 = mathmap.filter(item => item._3==100).map(item=>item._1)
    val bigdata100 = bigdatamap.filter(item => item._3==100).map(item=>item._1)

    // 遍历输出结果
//    math100.foreach(println(_))
//    bigdata100.foreach(println(_))

    val union = math100.union(bigdata100).distinct().collect()
    union.foreach(println(_))

    sc.stop()

  }

  /**
   * 输出每位同学的成绩综合
   */
  @Test
  def test03(): Unit ={

    // 首先读取数据
    val mathrdd = sc.textFile("src/main/scala/Book/data/result_math.txt")
    val bigdatardd = sc.textFile("src/main/scala/Book/data/result_bigdata.txt")

    // 由于文本文件中存的数据视为记录，所以要转换一下
    val mathmap = mathrdd.map{
      item => val line = item.split("\t");
        (line(0),line(1),line(2).toInt)
    }
    val bigdatamap = bigdatardd.map{
      item => val line = item.split("\t");
        (line(0),line(1),line(2).toInt)
    }

    //合并成绩单
    val allscore = mathmap.union(bigdatamap)
    val score    = allscore.map(item => (item._1,item._3))

    //reduceByKey按照key来对value相加
    val reduce = score.reduceByKey((curr,agg)=>curr+agg)

    //收集结果
    val result = reduce.collect()
    result.foreach(println(_))

    sc.stop()
  }

  @Test
  def test04(): Unit ={

    // 首先读取数据
    val mathrdd = sc.textFile("src/main/scala/Book/data/result_math.txt")
    val bigdatardd = sc.textFile("src/main/scala/Book/data/result_bigdata.txt")

    // 由于文本文件中存的数据视为记录，所以要转换一下
    val mathmap = mathrdd.map{
      item => val line = item.split("\t");
        (line(0),line(1),line(2).toInt)
    }
    val bigdatamap = bigdatardd.map{
      item => val line = item.split("\t");
        (line(0),line(1),line(2).toInt)
    }

    // union
    // 并且转换数据为（id,成绩) 样式
    val all: RDD[(String, Int)] = mathmap.union(bigdatamap).map(item => (item._1,item._3))

    // combineByKey
    val cb_score: RDD[(String, (Int, Int))] = all.combineByKey(
      count => (count,1),
      (acc:(Int,Int),next) => (acc._1+next,acc._2+1),
      (acc:(Int,Int),agg:(Int,Int)) => (acc._1+agg._1,acc._2+agg._2)
    )

    // 统计平均分
    val avg_score = cb_score.map(item => (item._1,item._2._1.toDouble/item._2._2)).collect()

    avg_score.foreach(println(_))

    sc.stop()
  }
}
