package cn.dmp.service
import java.util.Properties
import cn.dmp.util.AppParams
import com.alibaba.fastjson.JSONObject
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext, SaveMode}

/**
  * 3.2.0 统计各省市数据量分布情况
  */
object FlowDistribution {

  def accFlowDistributions(parquet: DataFrame,sqlContext: SQLContext): Unit ={
    //parquet.printSchema()
    parquet.registerTempTable("t_provence_info")
    val provenceInfo: DataFrame = sqlContext.sql("select count(1) as ct,provincename,cityname from t_provence_info group by provincename,cityname order by provincename,cityname")
    //provenceInfo.show()
    //一：存车处json到本地
    provenceInfo.coalesce(1).write.mode(SaveMode.Overwrite).json("provenceJson")

    //二：存储json文件到mysql数据库
    val prop: Properties = new Properties()
    prop.setProperty("user",AppParams.user)
    prop.setProperty("password",AppParams.password)
    prop.setProperty("driver",AppParams.driver)
    provenceInfo.write.mode(SaveMode.Overwrite).jdbc(AppParams.url,AppParams.provenceV1,prop)

    //三：用 spark 算子的方式实现上述的统计，存储到磁盘
    val rddInfo: RDD[Row] = parquet.rdd
    val rddProvence: RDD[((String, String), Int)] = rddInfo.map(row => {
      ((row.getString(24), row.getString(25)), 1)
    })
    val reduceInfo: RDD[((String, String), Int)] = rddProvence.reduceByKey(_+_)
    val jsonRdd: RDD[String] = reduceInfo.map(info => {
      val json: JSONObject = new JSONObject()
      json.put("ct", info._2)
      json.put("provincename", info._1._1)
      json.put("cityname", info._1._2)
      json.toJSONString
    })
    //不会shuffle
    jsonRdd.coalesce(1).saveAsTextFile("rddJsonProvence")
  }
}
