package com.hdaccp.ch08

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

import scala.collection.mutable.ListBuffer

object Demo3 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[2]")
      .appName("ch08Demo1App")
      .getOrCreate()

   /* val rdd = spark.sparkContext.textFile("F:\\accp教学\\sparkresources\\music1.txt")

    val df = rdd.map(x=>x.split("\t")).map(y=>Music(y(0),y(1),y(2),y(3),y(4),y(5))).toDF()*/

    val df = spark.read.format("parquet").load("F:\\accp教学\\sparkresources\\clean12")


    //不用注册临时表
    val dfs = df.select("song").groupBy("song").agg(count("song").as("times"))

    // dfs -> mysql
    dfs.foreachPartition(a=>{
      val list = new ListBuffer[Mu2]
      a.foreach(b=>{
        var song = b.getAs[String]("song")
        var times = b.getAs[Long]("times")
        list.append(Mu2(song,times))
      })
      MusicDao.insertMu2(list)
    })
    spark.stop()
  }
}
