package com.hdaccp.ch12

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

import scala.collection.mutable.ListBuffer

/**
  * coursetype|count|
+----------+-----+
|    course| 1274|
|     class| 7679|
|     learn| 1247

  +----------+-----+
|coursetype|times|
+----------+-----+
|    course| 1274|
|     class| 7679|
|     learn| 1247|
+----------+-----+
  */
object LogCourseJob {
  def main(args: Array[String]): Unit = {
    //1
    val spark = SparkSession.builder()
      .appName("LogCourseJobApp")
      .master("local[2]")
      .getOrCreate()
    import spark.implicits._
    //2
    val df = spark.read.format("parquet").load("F:\\accp教学\\sparkresources\\cleanlog5_1")
  //3
   val df2 = df.select("coursetype").groupBy("coursetype").agg(count("coursetype").as("times"))

   // df.createOrReplaceTempView("courselog")

    //df.sqlContext.sql("select coursetype,count(1) as times from courselog group by coursetype").show()
  //将统计结果写入到mysql数据库中

    df2.foreachPartition(p=>{
      val list = new ListBuffer[Coursetypetimes]
      p.foreach(a=>{
        val coursetype = a.getAs("coursetype").toString
        val times = a.getAs("times").toString.toLong
        list.append(Coursetypetimes(coursetype,times))
      })
      CoursetypetimesDao.insertCoursetypetimes(list)
    })
    spark.stop()
  }
}
