package sparkStreaming.taxi_hailingCount

import java.util.Properties

import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

/**
 * @Author:lixinlei
 * @Date:2021/6/3 8:57
 **/

object KafkaStreamingConsumer {

  //mysql的相关配置信息，放在此处避免每次写入重新定义
  val prop:Properties = new Properties();
  prop.put(Common.MYSQL_USER,Common.MYSQL_USERNAME_VALUE)
  prop.put(Common.MYSQL_PASSWORD,Common.MYSQL_PASSWORD_VALUE)

  /**
   * kafka消费者
   * @param ssc
   */
  def kafkaConsumer(ssc:StreamingContext): Unit = {

    //设置kafka消费者相关属性
    val kafkaPara = Map[String,Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> Common.BOOTSTRAP,
      ConsumerConfig.GROUP_ID_CONFIG -> Common.KAFKA_GROUP_ORACLE,
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> Common.DESERIALIZER_CLASS,
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG ->Common.DESERIALIZER_CLASS
    )

    //通过第三方kafkaUtils类创建数据流，生成DStream
    val kafkaData = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Set(Common.KAFKA_TOPIC_ORACLE), kafkaPara)
    )

//    kafkaData.persist()

    //kafka消费过来的数据，完整写入didi表，保存原始订单数据信息
    kafkaData.map(data=>{
      val singleLineData = data.value()
      val fields = singleLineData.split(",")
      (fields(0),fields(1),fields(2),fields(3),fields(4))
    }).foreachRDD(rdd=>{
      val spark = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()
      import spark.implicits._
      val didiDF = rdd.toDF("phone","address","time","distance","price")
      didiDF.createOrReplaceTempView("didi")
      val resDF = spark.sql("select UUID() as id,* from didi")
      resDF.show()
      resDF.write.mode(SaveMode.Append).jdbc(
        Common.MYSQL_URL_VALUE_SPARK,
        Common.MYSQL_TABLE_DIDI,
        prop
      )
    })

    //kafka消费过来的数据，计算其每个地区每天的订单总量，总距离，总收入
    kafkaData.map(data=>{
      val singleLineData = data.value()

      val fields = singleLineData.split(",")

      val dateTime = fields(2).split(" ")(0)

      //把address和年月日当做key，把距离，价格和1做value，有状态求和
      fields(1)+"#"+dateTime -> (fields(3).toInt,fields(4).toInt,1)
    }).updateStateByKey(
      (values:Seq[(Int,Int,Int)],state:Option[(Int,Int,Int)]) => {
        var res = state.getOrElse((0,0,0))
        for(value <- values){
          res = (res._1+value._1,res._2+value._2,res._3+value._3)
        }
        Option(res)
      }
    ).map(dataCount=>{
      //把求和之后的结果展开，以便下一步输出
      val addAndDate = dataCount._1.split("#")
      (addAndDate(0),addAndDate(1),dataCount._2._1,dataCount._2._2,dataCount._2._3)
    }).foreachRDD(rdd=>{
        //把每个rdd转成DataFrame，使用sql语句做数据统计
        val spark = SparkSession.builder.config(rdd.sparkContext.getConf).getOrCreate()
        import spark.implicits._

        val lineDF = rdd.toDF("address","dateTime","distanceCount","priceCount","count")

        lineDF.createOrReplaceTempView("didicount")

        val resDF = spark.sql("select address,dateTime,sum(distanceCount) as distanceCount," +
          " sum(priceCount) as priceCount,sum(count) as count from didicount group by dateTime,address")

//        resDF.show()

        //最终结果写入mysql数据库
        resDF.write.mode(SaveMode.Overwrite).jdbc(
          Common.MYSQL_URL_VALUE_SPARK,
          Common.MYSQL_TABLE_DIDICOUNT,
          prop
        )
    })

  }

  def main(args: Array[String]): Unit = {

    val ssc = Common.getStreamingContext("local[*]","app")
    //后面做updateStateByKey时需要保存状态
    ssc.checkpoint(Common.FOREACHRDD_CHECKPOINT_DIR)

    kafkaConsumer(ssc)

    ssc.start()
    ssc.awaitTermination()
  }


}
