package com.hdaccp.ch11

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

import scala.collection.mutable.ListBuffer

/**
  * |coursetype|times|
+----------+-----+
|    course| 1274|
|     class| 7679|
|     learn| 1247|
+----------+-----+

  |coursetype|times|
+----------+-----+
|course    |1274 |
|class     |7679 |
|learn     |1247 |
  */
object MyLogTopByCouseTypeJob {
  def main(args: Array[String]): Unit = {
    //
    val spark = SparkSession.builder()
      .appName("TypeJobApp")
      .master("local[2]")
      .getOrCreate()
    import  spark.implicits._
    val df = spark.read.format("parquet").load("F:\\accp教学\\sparkresources\\cleanlog5")
    val df2 = df.select("coursetype").groupBy("coursetype").agg(count("coursetype").as("times"))
    val df3 = df2.orderBy(df2.col("times").asc)

   // df.createOrReplaceTempView("mylog")

    //df.sqlContext.sql("select coursetype,count(1) as times from mylog group by coursetype").show(false)
  //将统计的结果写入mysql数据库

    df3.foreachPartition(p=>{
      val list = new ListBuffer[MyLogCourseType]
      for (elem <- p) {
        val courseType = elem.getAs("coursetype").toString
        val times = elem.getAs("times").toString.toLong
        list.append(MyLogCourseType(courseType,times))
      }
      MyLogCoureTypeDao.insertMyLogByCourseType(list)
    })

    spark.stop()
  }
}
