package cn.sheep.dmp.report

import java.util.Properties

import com.typesafe.config.ConfigFactory
import org.apache.commons.io.FileUtils
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.{SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 地域数据分布统计
  * Sheep.Old @ 64341393
  * Created 2018/3/28
  */
object AreaAnalysis2Json {

    def main(args: Array[String]): Unit = {

        val load = ConfigFactory.load()

        val sparkConf = new SparkConf().setAppName("地域数据分布统计")
          .setMaster("local[*]")
          .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

        val sc = new SparkContext(sparkConf)
        val sqlc = new SQLContext(sc)

        // 读取数据
        val dataFrame = sqlc.read.parquet(load.getString("parquet.path"))

        // 使用SQL
        dataFrame.registerTempTable("logs")

        // 聚合统计
        val result = sqlc.sql(
            """
              |select provincename, cityname, count(*) cnt
              |from logs group by provincename, cityname
            """.stripMargin)


        // 判断存储目录是否存在，如果存在则删除
        val system = FileSystem.get(sc.hadoopConfiguration)
        val path = new Path("F:\\dmp\\report_area")
        if(system.exists(path)){
            system.delete(path, true)
        }

        // 将统计结果写入到数据库中
        result.coalesce(4).write.json("F:\\dmp\\report_area")

        sc.stop()
    }

}
