package cn.dmp.report

import com.typesafe.config.ConfigFactory
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

object ProAnalyseSQL {


    def main(args: Array[String]): Unit = {

        // 检验参数
        if(args.length !=2) {
            println(
                """
                  |cn.dmp.report.ProAnalyseSQL
                  |参数：<dataInputPath> <outPutPath>
                """.stripMargin)
            sys.exit()
        }


        val config = ConfigFactory.load()

        // 接受参数
        val Array(dataInputPath, outPutPath) = args

        // 设置job所需的参数 sparkconf
        val sparkConf = new SparkConf()
        sparkConf.setAppName(s"${this.getClass.getSimpleName}")
        sparkConf.setMaster("local[*]")

        // sparkcontext
        val sc = new SparkContext(sparkConf)
        val sQLContext = new SQLContext(sc)

        // 读取数据 -> parquet
        val dataFrame = sQLContext.read.parquet(dataInputPath)


        // 判断输出目录是否已经存在，如果存在则删除
        val fileSystem = FileSystem.get(sc.hadoopConfiguration)
        val path = new Path(outPutPath)
        if (fileSystem.exists(path)) {
            fileSystem.delete(path, true)
        }

        dataFrame.registerTempTable("logs")
        sQLContext.sql(
            """
              |select
              |provincename, cityname, count(*) ct
              |from logs
              |group by provincename, cityname
            """.stripMargin).coalesce(4).write.json(outPutPath)


        sc.stop()
    }

}
