package cn.sheep.dolphin.report

import cn.sheep.dolphin.common.DolphinAppComm
import cn.sheep.dolphin.config.DolphinConfig
import com.google.gson.Gson
import org.apache.spark.sql.{SQLContext, SaveMode}

/**
  * 统计各省市数据分布情况的 - Core
  * author: old sheep
  * QQ: 64341393 
  * Created 2018/11/29
  */
object ProvinceCityAnalysisCore {

	def main(args: Array[String]): Unit = {

		if (args.length != 2) {
			println(
				"""
				  |Usage: cn.sheep.dolphin.report.ProvinceCityAnalysisCore
				  |参数：<parquetInputPath> <jsonOutputPath>
				""".stripMargin)
			sys.exit(-1)
		}

		// 接受参数
		val Array(parquetInputPath, jsonOutputPath) = args

		// sparkContext
		val sc = DolphinAppComm.createSparkContext("统计各省市数据分布情况的")

		// parquet <- SQLContext
		val sqlc = new SQLContext(sc)
		val dataFrame = sqlc.read.parquet(parquetInputPath)

		// 统计数据
		val result = dataFrame.map(row => {
			// 日期
			val day = row.getAs[String]("requestdate").substring(0, 10)
			// 省份
			val province = row.getAs[String]("provincename")
			// 地市
			val city = row.getAs[String]("cityname")

			((day, province, city), 1)
		}).reduceByKey(_ + _)


		// 存储-MySQL
		import sqlc.implicits._
		val resultDF = result
			.map { case ((day, province, city), count) => (day, province, city, count) }
			.toDF("day", "province", "city", "cnt")

		resultDF.write.mode(SaveMode.Overwrite).jdbc(DolphinConfig._url, "r_procity_analysis", DolphinConfig.props)


		// 存储-JSON

		// 第一种方式
		// resultDF.write.json()

		// 第二种方式 case class => Schema
		result.map { case ((day, province, city), count) => {
			val gson = new Gson()
			gson.toJson(RProcityAnalysis(day, province, city, count))
		}}.saveAsTextFile(jsonOutputPath)

		// 第三中方式 StructType => Schema


		sc.stop()
	}

}

/*封装结果数据样例对类*/
case class RProcityAnalysis(day: String, province: String, city: String, cnt: Int)
