package com.bigdata.author

import com.bigdata.topN.Constants
import org.apache.spark.sql.SparkSession

import scala.collection.mutable.ListBuffer

object AuthorDistrict {
  def main(args: Array[String]): Unit = {
    val session = SparkSession.builder().appName("authorDis").master("local").getOrCreate()
    session.sqlContext.read.load(Constants.HADOOP_STORAGE+Constants.TIME_GENERATOR+"-"+Constants.TABLE_USER)
      .createOrReplaceTempView("user")

    val addressDF = session.sql("select address,count(*) as count from user group by address order by count desc")
    addressDF.show()


    addressDF.foreachPartition(partition=>{
      val list = new ListBuffer[Address]
      partition.foreach(info => {
        val address = info.getAs[String]("address")
        val count = info.getAs[Long]("count")
        list.append(Address(address,count,Constants.TIME_GENERATOR))
      })
      AuthorDTO.insertAuthorDistrict(list)
    })
  }

}
