package com.atguigu.sparkstreaming.apps

import java.sql.{Connection, PreparedStatement, ResultSet}

import com.alibaba.fastjson.JSON
import com.atguigu.realtime.constants.{PrefixConstant, TopicConstant}
import com.atguigu.realtime.utils.RedisUtil
import com.atguigu.sparkstreaming.beans.{OrderInfo, StartLog}
import com.atguigu.sparkstreaming.utils.{DStreamUtil, DateParseUtil, JDBCUtil}
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis

import scala.collection.mutable

/**

 *   聚合类运算，属于累加，都没有幂等性。
 *
 *    at least once + 事务 保证精确一次。
 *
 *   -----------------------
 *    设计表:
 *        (statdate,stathour,gmv)
 *
 *        粒度： 每天的每个小时 是一行
 *        主键:  (statdate,stathour)
 *
 *   --------------------------
 *    如果当前希望从order_info的最新位置消费： 需要把 "auto.offset.reset" -> "latest"
 *    如果当前希望从order_info的最早位置消费： 需要自己去mysql的 offsets表中初始化
 *        realtime220309,REALTIME_DB_ORDER_INFO,0,0
 *        realtime220309,REALTIME_DB_ORDER_INFO,1,0
 *        realtime220309,REALTIME_DB_ORDER_INFO,2,0
 *
 *
 */
object GAVApp extends BaseApp {
  override var groupId: String = "realtime220309"
  override var topic: String = TopicConstant.ORDER_INFO
  override var appName: String = "GAVApp"
  override var batchDuration: Int = 10

  def selectOffsetsFromMysql(groupId:String,topic:String):Map[TopicPartition, Long]={

    val offsets = new mutable.HashMap[TopicPartition, Long]()

    val sql=
      """
        |
        |select
        |   partitionId,offset
        |from offsets
        |where groupId=? and topic=?
        |
        |
        |""".stripMargin

    var connection: Connection = null
    var ps: PreparedStatement = null

    try {
      connection = JDBCUtil.getConnection()

      ps = connection.prepareStatement(sql)
      ps.setString(1,groupId)
      ps.setString(2,topic)

      val resultSet: ResultSet = ps.executeQuery()

      while(resultSet.next()){

        offsets.put(new TopicPartition(topic,resultSet.getInt("partitionId")), resultSet.getLong("offset") )

      }

    } catch {
      case e:Exception =>{

        e.printStackTrace()

        throw new RuntimeException("查询偏移量失败!")

      }
    } finally {

      if (ps != null){

        ps.close()

      }

      if (connection != null){

        connection.close()
      }

    }


    //可变转不可变
    offsets.toMap

  }


  def parseBean(rdd: RDD[ConsumerRecord[String, String]]):RDD[OrderInfo] = {

    rdd.map(record => {

      val orderInfo: OrderInfo = JSON.parseObject(record.value(), classOf[OrderInfo])
      orderInfo.create_date = DateParseUtil.parseDateTimeStrToDate(orderInfo.create_time)
      orderInfo.create_hour = DateParseUtil.parseDateTimeStrToHour(orderInfo.create_time)

      orderInfo

    })


  }


  def wirteResultAndOffsetsInCommonTransaction(result: Array[((String, String), Double)], ranges: Array[OffsetRange]): Unit = {

    /*
        累积 GMV
     */
    val sql1=
      """
        |INSERT INTO `gmvstats` VALUES(?,?,?)
        |ON DUPLICATE KEY UPDATE gmv=gmv + VALUES(gmv)
        |
        |
        |""".stripMargin

    //写偏移量
    val sql2=
      """
        |INSERT INTO `offsets` VALUES(?,?,?,?)
        |ON DUPLICATE KEY UPDATE OFFSET=VALUES(OFFSET)
        |
        |""".stripMargin


    var connection: Connection = null
    var ps1: PreparedStatement = null
    var ps2: PreparedStatement = null

    try {
      connection = JDBCUtil.getConnection()

      //开启事务 取消事务的自动提交，改为手动提交
      connection.setAutoCommit(false)

      ps1 = connection.prepareStatement(sql1)
      ps2 = connection.prepareStatement(sql2)


      for (((date, hour), gmv) <- result) {

        ps1.setString(1,date)
        ps1.setString(2,hour)
        ps1.setDouble(3,gmv)

        //攒起来
        ps1.addBatch()

      }

      for (offsetRange <- ranges) {

        ps2.setString(1,groupId)
        ps2.setString(2,topic)
        ps2.setInt(3,offsetRange.partition)
        ps2.setLong(4,offsetRange.untilOffset)

        ps2.addBatch()

      }

      val res1: Array[Int] = ps1.executeBatch()
      val res2: Array[Int] = ps2.executeBatch()

      //提交事务
      connection.commit()

      println("数据写入:"+res1.size)
      println("偏移量写入:"+res2.size)


    } catch {
      case e:Exception =>{

        //回滚事务
        connection.rollback()

        e.printStackTrace()

        throw new RuntimeException("写入失败!")

      }
    } finally {

      if (ps1 != null){

        ps1.close()

      }
      if (ps2 != null){

        ps2.close()

      }

      if (connection != null){

        connection.close()
      }

    }




  }



  def main(args: Array[String]): Unit = {

    //重写context
    context = new StreamingContext("local[*]",appName,Seconds(batchDuration))

    runApp{

      //到mysql查询上次提交的偏移量
      val offsetMap: Map[TopicPartition, Long] = selectOffsetsFromMysql(groupId, topic)

      //从刚刚查询的位置向后消费的流
      val ds: InputDStream[ConsumerRecord[String, String]] = DStreamUtil.createDStream(groupId, context, topic,true,offsetMap)

      ds.foreachRDD(rdd => {

        if (!rdd.isEmpty()){

          //获取当前消费到的这个批次偏移量
          val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

          //封装样例类
          val rdd1: RDD[OrderInfo] = parseBean(rdd)

          //当前批次在每一天每个小时成交的总额
          val result: Array[((String, String), Double)] = rdd1.map(orderInfo => ((orderInfo.create_date, orderInfo.create_hour), orderInfo.total_amount))
            .reduceByKey(_ + _)
            .collect()

          //将结果和偏移量一起写入数据库
          wirteResultAndOffsetsInCommonTransaction(result,ranges)

        }


      })


    }


  }
}
