package read.niit.service


import org.apache.spark.streaming.dstream.DStream
import read.niit.bean.Reader
import read.niit.dao.{GradeBookDao, Top4WithGradeBookDao}
import read.niit.util.SparkUtil

class Top4WithGradeBookService {

  val spark = SparkUtil.takeSpark()
  private val bookDao = new Top4WithGradeBookDao

  def dataAnalysis(reader: DStream[Reader]): Unit = {

    gradeBookTop4(reader)
  }

  //需求：统计top10看书活跃年级

  /* private def gradeBookTop4(reader: DStream[Reader]): Unit = {
    val reduceDS: DStream[(String, Int)] = reader.map(data => {
      val grade_id = data.grade_id
      // ( (时间，区域，城市，广告),次数）
      (grade_id, 1)
    }).reduceByKey(_ + _)
    //    val mapDS = reader.map(data => {
    //      (data.grade_id, 1)
    //    }).reduceByKey(_+_)
    reduceDS.foreachRDD(rdd => {
      println("--------------reduceDS---------------")
      println(s"reduceDS has ${rdd.count()} records")
      rdd.foreach({
        case (grade_id, count) => {
          println(s"${grade_id},${count}")
          val gradeBookDao = new GradeBookDao
          gradeBookDao.insertGradeBook(grade_id, count)
        }
      })
    })
  }
}

  */


  private def gradeBookTop4(reader: DStream[Reader]): Unit = {
    //1.  (题目id，1)
    val mapDS = reader.map(data => {
      (data.grade_id, 1)
    })
    //2.(题目id_1，1)  (题目id_1，1)  (题目id_1，1)  (题目id_1，1)
    // (题目id_1，4)  (题目id_2，4)  (题目id_3，4)
    val reduceData = mapDS.reduceByKey(_ + _)
    //3.排序并输出
    reduceData.foreachRDD(rdd => {
      // 第一个_ : (题目id_1，4)  _2: 4 默认升序
      val sortRDD = rdd.sortBy(_._2, false)
      val top4: Array[(String, Int)] = sortRDD.take(4)
      println("----Top4看书活跃年级----")
      top4.foreach(println)
      top4.foreach(t => {
        val grade = t._1
        val count = t._2
        val bool = bookDao.selectGradeById(grade)
        if (bool) {
          bookDao.updateGradeInfo(grade, count)
        } else {
          bookDao.insertGradeInfo(grade, count)
        }
      })

    })
  }
}


