package com.itcast.report

import com.itcast.utils.{ConfigHandler, FileHandler, MysqlHandler}
import org.apache.spark.sql.{SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

//统计日志文件中各省市的数据分布情况
            //sql方式
object LogDataAnalysis {
  def main(args: Array[String]): Unit = {
    //创建sparkConf
    val sparkConf = new SparkConf()
      .setAppName("LogDataAnalysis")
      .setMaster("local[*]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    //创建sparkContext
    val sparkContext = new SparkContext(sparkConf)
    //创建sqlContext
    val sQLContext = new SQLContext(sparkContext)
    //读取数据
    val rawDataFrame = sQLContext.read.parquet(ConfigHandler.parquetPath)
    //按照需求进行相应分析
    //将DataFrame注册成一张临时表
    rawDataFrame.registerTempTable("log")
    //根据省份和市进行相应分组统计
    val result = sQLContext.sql(
      """
       select count(*) ct,provincename,cityname
       from log group by provincename,cityname
      """.stripMargin)
    FileHandler.deleteWillOutputDir(sparkContext,ConfigHandler.rptPath)
    //将结果写成json格式
//    result
//       //合并分区
//        .coalesce(1)
//      .write.json(ConfigHandler.rptPath)
    //将结果写出到mysql中
    //result.write.mode(SaveMode.Overwrite).jdbc(ConfigHandler.url,ConfigHandler.table,ConfigHandler.dbProper)
    MysqlHandler.save2db(result,ConfigHandler.table)
    sparkContext.stop()

  }
}
