package org.yonggan.dmp.report

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
import scalikejdbc.config.DBs
import scalikejdbc.{DB, SQL}

/**
  * RDD 算子的方式实现
  */
object RptProCityCountRDD {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName("统计各省市数据量分布情况")
      .setMaster("local[*]")
    val sc = new SparkContext(conf)

    // 创建SqlContext
    val sqlContext = new SQLContext(sc)

    // 先读取文件
    val baseDf = sqlContext.read.parquet("D:\\project\\5")

    // 提取出RDD
    val rowRDD: RDD[Row] = baseDf.rdd

    val result: RDD[((String, String), Int)] = rowRDD.map(row => {
      ((row.getAs[String]("provincename"), row.getAs[String]("cityname")), 1)
    }).reduceByKey(_ + _)

    // 方式一 转换为DataFrame json 写出
    // 方式二 转换为样例类的JSON
//    result.map(tp=>{
//      val gson = new Gson()
//      gson.toJson(RptProCity(tp._1._1,tp._1._2,tp._2))
//    }).saveAsTextFile("D:\\project\\7")


    // 写MySQL
    result.foreachPartition(partition=>{
      DBs.setup()
      partition.foreach(f=>{
        DB localTx(implicit session=>
          SQL(
            """
              |INSERT INTO `dmp`.`dmp_pro_city` (`provincename`, `cityname`, `cnt`)
              |VALUES (?, ?, ?)
            """.stripMargin).bind(f._1._1,f._1._2,f._2).update().apply()
          )
      })
    })

    sc.stop()
  }

}
