package cn.pengpeng.dmp.report

import cn.pengpeng.dmp.beans.OffLineReport.ReportLogDataAnalysis
import cn.pengpeng.dmp.utils.ConfigHandler
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

/**
  * 统计日志文件中各省市的数据分布情况
  * 第二种方式：
  *   core方式
  */
object LogDataAnalysisCore {
  def main(args: Array[String]): Unit = {
    val inputPath = "d:\\data\\spark\\out1"
    val conf = new SparkConf()
      .setAppName("LogDataAnalysisCore")
      .setMaster("local[*]")
      .set("spark.serializer","org.apache.spark.serializer.KryoSerializer")
    val spark: SparkSession = SparkSession
      .builder()
      .config(conf)
      .getOrCreate()
    //读取数据
    val parquet: DataFrame = spark.read.parquet(inputPath)

    // (key,value)
    val result: RDD[((String, String), Int)] = parquet.rdd.map(row => {
      val pname = row.getAs[String]("provincename")
      val cname = row.getAs[String]("cityname")
      ((pname, cname), 1)
    }).reduceByKey(_ + _)

    import spark.implicits._
    val dataFrame: DataFrame = result.map(c => ReportLogDataAnalysis(c._1._1,c._1._2,c._2)).toDF()
    //保存成json
    //dataFrame.write.mode(SaveMode.Overwrite).json(ConfigHandler.logdataAnalysisResultJsonPath)
    //写到数据库中
    dataFrame.write.jdbc(ConfigHandler.url,ConfigHandler.logdataAnalysis_table,ConfigHandler.dbProps)

    //关闭资源
    spark.stop()
  }

}
