package com.hdaccp.ch10

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

import scala.collection.mutable.ListBuffer

object TopNCourseByDateJob {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[2]")
      .appName("A")
      .getOrCreate()

    //
    val df = spark.read.format("parquet").load("F:\\accp教学\\sparkresources\\cleanlog1")

    val df2 = df.select("fangdate","coursecode").groupBy("fangdate","coursecode").agg(count("coursecode").as("times"))//.show()

    //将统计结果写入数据库
    df2.foreachPartition(x=>{
      val list = new ListBuffer[MyLogByDateState]
      x.foreach(y=>{
        var fangdate = y.getAs[String]("fangdate")
        var coursetype = y.getAs[String]("coursecode")
        var times =y.getAs[Long]("times")
        list.append(MyLogByDateState(fangdate,coursetype,times))
      })
      MyLogByDateDao.insertMyLogByDate(list)
    })

    spark.stop()
  }
}
