package com.hdaccp.ch07
import com.hdaccp.log.StatDAO
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import scala.collection.mutable.ListBuffer
object Demo4 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[2]")
      .appName("ch07Demo3App")
      .getOrCreate()

    val df = spark.read.format("parquet").load("F:\\accp教学\\sparkresources\\clean6")

    //df有两种操作
    val df2 = df.select("types","zd").groupBy("types","zd").agg(count("types").as("times"))
    //声明一个list

    df2.foreachPartition(f=>{
      val listBuffer = new ListBuffer[Mu1]
      f.foreach(m=>{
        //
        var types = m.getAs[String]("types")
        var zd = m.getAs[String]("zd")
        var times = m.getAs("times").toString.toLong
        listBuffer.append(Mu1(types,zd,times))
      })

      StatDAO.insertMu1(listBuffer)
    })

    //将listBuffer中的数据写入




    spark.stop()
  }
}
