package com.zhao.biz.report.report1_provincecity

import com.typesafe.config.Config
import com.zhao.dao.ProvinceCityDaoImpl
import com.zhao.entity.{Log, ProvinceAndCityInfo}
import com.zhao.utils.CommonUtil
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

import java.util.Properties

/**
 * Description: <br/>
 * Copyright (c) ，2021 ， 赵 <br/>
 * A wet person does not fear the rain. <br/>
 * Date： 2021/1/13 10:44
 *
 * @author 柒柒
 * @version : 1.0
 */

object CalPerProvinceCity {
  def main(args: Array[String]): Unit = {

    //接受参数
    val inputPath = "a_data/outputpath"
    val outputPath = "a_data/result/json"
    val Array(input, output) = Array(inputPath, outputPath)

    //1.SparkSession
    val spark: SparkSession = SparkSession
      .builder
      .appName(this.getClass.getSimpleName)
      .master("local[*]")
      //设置序列化的技术(使用kryo)
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //设置snappy压缩的格式
      .config("spark.sql.parquet.compression.codec", "snappy")
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    //启用对于特定类的实例使用Kryo
    sc.getConf.registerKryoClasses(Array(classOf[Log]))

    //读取HDFS上清洗后的数据,并进行计算
    spark.read.parquet(input).createOrReplaceTempView("tb_log")

    val resultDF: DataFrame = spark.sql(
      """
        |select provincename,
        |       cityname,
        |       count(1) cnt
        |from tb_log
        |group by provincename,cityname
        |""".stripMargin)

    //落地
    //方式1:json
//    resultDF.repartition(1).write
//      .mode(SaveMode.Overwrite)
//      .json(output)

    //方式2:mysql(说明:真实项目中对DB的操作一般选用jdbc架构,很少使用自带的api,如:jdbc)
    //方案一:选用DataFrame的jdbc算子(不推荐)
//    val load: Config = CommonUtil.load
//
//    //设置请求Mysql的配置信息
//    val prop: Properties = new Properties()
//    prop.setProperty("user",load.getString("db.default.user"))
//    prop.setProperty("password",load.getString("db.default.password"))
//    resultDF.write
//      .mode(SaveMode.Append)
//      .jdbc(load.getString("db.default.url"),load.getString("db.default.tbname"),prop)

    //方案二:使用jdbc框架去完成dao层的操作(便于项目的可维护性)
//    resultDF.rdd
//      .foreachPartition(itr =>{
//        if (!itr.isEmpty){
//          val dao: ProvinceCityDaoImpl = new ProvinceCityDaoImpl
//          itr.foreach(row =>{
//            val provinceName = row.getAs[String]("provincename")
//            val cityName = row.getAs[String]("cityname")
//            val cnt = row.getAs[Long]("cnt")
//            dao.save(new ProvinceAndCityInfo(provinceName,cityName,cnt.toInt))
//          })
//        }
//      })

    //方式3:使用Spark core算子计算并落地
    spark.read.parquet(input)
      .rdd.map(row =>{
      val provinceName = row.getAs[String]("provincename")
      val cityName = row.getAs[String]("cityname")
      ((provinceName,cityName),1)
    }).reduceByKey(_+_).foreach(println)
  }
}


















