package com.atguigu.realtime.apps

import java.sql.{Connection, PreparedStatement, ResultSet}

import com.alibaba.fastjson.JSON
import com.atguigu.realtime.beans.OrderInfo
import com.atguigu.realtime.constants.TopicConstant
import com.atguigu.realtime.utils.{DStreamUtil, DateTimeUtil, JDBCUtil}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable

/**
 * Created by Smexy on 2022/8/26
 *
 *    向Mysql中保存结果，属于聚合类应用。可以选择事务输出实现精确一次。
 */
object GMVApp extends BaseApp {

  override var batchDuration: Int = 10
  override var appName: String = "GMVApp"
  override var groupId: String = "220409realtime"
  override var topic: String = TopicConstant.ORDER_INFO

  //根据groupId和topic,从关系型数据库读取 上次提交的 offsets
  def readHistoryOffsetsFromMysql(groupId:String,topic:String):mutable.Map[TopicPartition, Long]={

    //准备要返回的结果
    val offsets = new mutable.HashMap[TopicPartition, Long]()

    //使用JDBC的方式读取上次提交的offsets
    val sql =
      """
        |
        |select
        |   partitionId,untilOffset
        |from offsets
        |where groupId = ? and topic = ?
        |
        |""".stripMargin

    var connection:Connection = null
    var ps: PreparedStatement = null

    try {
      connection = JDBCUtil.getConnection()
      ps = connection.prepareStatement(sql)
      ps.setString(1, groupId)
      ps.setString(2, topic)
      val resultSet: ResultSet = ps.executeQuery()

      while (resultSet.next()) {
        offsets.put(
          new TopicPartition(topic, resultSet.getInt("partitionId")),
          resultSet.getLong("untilOffset"))
      }
    } catch {
      case e:Exception =>{
        e.printStackTrace()
      }
    } finally {
      if (ps != null){
        ps.close()
      }

      if (connection != null){
        connection.close()
      }

    }

    offsets

  }

  //将分布式计算的结果收集到Driver端，和offsets在一个事务中写出
  def writeDataAndOffsetsInATransction(data: Array[((String, String), Double)], offsets: Array[OffsetRange]): Unit = {

    //有状态的计算，每次写入mysql前，要和上次这个word的count进行运算之后再写入

    // count : 代表mysql中要写入的word已经写入的值， VALUES(COUNT)代表当前批次要写入的值
    val sql1=
      """
        |
        |INSERT INTO `gmvstats` VALUES(?,?,?)
        |ON DUPLICATE KEY UPDATE gmv = gmv + VALUES(gmv)
        |
        |
        |""".stripMargin

    val sql2=
      """
        |
        |REPLACE INTO `offsets` VALUES(?,?,?,?)
        |
        |""".stripMargin


    var connection:Connection = null
    var ps1: PreparedStatement = null
    var ps2: PreparedStatement = null

    try {
      connection = JDBCUtil.getConnection()

      //体现事务: 取消事务的自动提交，改为手动去提交
      connection.setAutoCommit(false)

      ps1 = connection.prepareStatement(sql1)
      ps2 = connection.prepareStatement(sql2)

      /*
          填充占位符

          for循环每运行一次，生成一条 insert sql
              INSERT INTO `wordcount` VALUES(‘a’,5) ON DUPLICATE KEY UPDATE COUNT = COUNT + 5
                 ....
       */
      for (((date, hour), gmv) <- data) {
        ps1.setString(1,date)
        ps1.setString(2,hour)
        ps1.setDouble(3,gmv)

        //一条sql生成后，不着急发给mysql，而是攒起来，攒够一批，一起发
        ps1.addBatch()
      }

      //一个offsetRange代表当前批次所消费的一个分区的偏移量
      for (offsetRange <- offsets) {

        ps2.setString(1,groupId)
        ps2.setString(2,topic)
        ps2.setInt(3,offsetRange.partition)
        //当前批次最后消费的位置，提交的是这个参数
        ps2.setLong(4,offsetRange.untilOffset)

        ps2.addBatch()

      }

      //批处理，一次性发给mysql
      val dataSuccess: Array[Int] = ps1.executeBatch()
      val offsetsSuccess: Array[Int] = ps2.executeBatch()

      //体现事务: 提交事务
      connection.commit()

      println("数据写成功了:" + dataSuccess.size)
      println("offsets写成功了:" + offsetsSuccess.size)


    } catch {
      case e:Exception =>{
        //体现事务: 出现异常，回滚事务
        connection.rollback()
        e.printStackTrace()

      }
    } finally {
      if (ps1 != null){
        ps1.close()
      }

      if (ps2 != null){
        ps2.close()
      }

      if (connection != null){
        connection.close()
      }

    }

  }


  def parseBean(rdd: RDD[ConsumerRecord[String, String]]):RDD[OrderInfo] ={

    rdd.map(record => {

      val orderInfo: OrderInfo = JSON.parseObject(record.value(), classOf[OrderInfo])
      orderInfo.create_date = DateTimeUtil.parseTimeStrToDate(orderInfo.create_time)
      orderInfo.create_hour = DateTimeUtil.parseTimeStrToHour(orderInfo.create_time)

      orderInfo

    })

  }

  def main(args: Array[String]): Unit = {

    //重写父类中的context
    context = new StreamingContext("local[*]",appName,Seconds(batchDuration))

    //查询Mysql中上次提交的offsets
    val offsets: mutable.Map[TopicPartition, Long] = readHistoryOffsetsFromMysql(groupId, topic)

    runSparkStreamingApp{

      val ds: InputDStream[ConsumerRecord[String, String]] = DStreamUtil.getDStream(context, groupId, topic,true,offsets)

      ds.foreachRDD(rdd => {

        //来数据时，再执行计算逻辑
        if (!rdd.isEmpty()){

          //③获取当前批次数据的offsets
          val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

          //④进行业务的计算，按照每天每小时统计 GMV
          val rdd1: RDD[OrderInfo] = parseBean(rdd)

          val data: Array[((String, String), Double)] = rdd1.map(o => ((o.create_date, o.create_hour), o.total_amount))
            .reduceByKey(_ + _)
            .collect()

          //⑤将分布式计算的结果收集到Driver端，和offsets在一个事务中写出
         writeDataAndOffsetsInATransction(data,ranges)

          //同时把offsets也提交到kafka.没用，下次读取从mysql读
          ds.asInstanceOf[CanCommitOffsets].commitAsync(ranges)
        }

      })

    }

  }
}
