package com.atguigu.realtime.streaming.apps

import java.sql.{Connection, PreparedStatement, ResultSet}
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter

import com.alibaba.fastjson.JSON
import com.atguigu.realtime.constants.TopicConstant
import com.atguigu.realtime.streaming.apps.StartLogApp.{appName, batchDuration, context}
import com.atguigu.realtime.streaming.beans.OrderInfo
import com.atguigu.realtime.streaming.utils.{JDBCUtil, MyKafkaUtil}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable

/**
 * Created by Smexy on 2022/5/27
 *
 *    计算每一天，每个小时的订单成交总额。
 *        是一个有状态的计算。
 *
 *    事务输出
 *    幂等输出(不能选择)
 *
 *    -------------
 *    为GMV数据建表
 */
object GMVApp extends BaseApp {
  override var appName: String = "GMVApp"
  override var batchDuration: Int = 10
  override var groupId: String = "realtime1227"
  override var topic: String = TopicConstant.ORDER_INFO

  /*
     根据groupId和要消费的topic查询之前提交的offsets信息
  */
  def readHistoryOffsets(groupId:String,topic:String):Map[TopicPartition,Long]={

    val offsetsMap = new mutable.HashMap[TopicPartition, Long]()

    val sql=
      """
        |
        |select
        |   *
        |from offsets
        |where groupId=? and topic=?
        |
        |""".stripMargin

    var conn:Connection = null
    var ps: PreparedStatement = null



    try {
      conn = JDBCUtil.getConnection()
      ps = conn.prepareStatement(sql)
      ps.setString(1, groupId)
      ps.setString(2, topic)

      val resultSet: ResultSet = ps.executeQuery()
      while (resultSet.next()) {

        //当前组上次提交的消费当前主题的每一个分区的偏移量
        offsetsMap.put(new TopicPartition(topic, resultSet.getInt("partitionId")), resultSet.getLong("untiloffset"))

      }
    } catch {
      case e:Exception =>e.printStackTrace()
    } finally {

      if(ps != null){
        ps.close()

      }

      if(conn != null){

        conn.close()
      }

    }


    offsetsMap.toMap

  }

  def parseBean(rdd: RDD[ConsumerRecord[String, String]]):RDD[OrderInfo] = {

    rdd.mapPartitions(partition => {

      //声明时间格式
      val formatter1: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd")
      val formatter3: DateTimeFormatter = DateTimeFormatter.ofPattern("HH")
      val formatter2: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")

      partition.map(record => {

        val orderInfo: OrderInfo = JSON.parseObject(record.value(), classOf[OrderInfo])

        // create_time": "2022-05-25 02:23:00
        //为create_date和create_hour赋值
        val time: LocalDateTime = LocalDateTime.parse(orderInfo.create_time, formatter2)
        orderInfo.create_date = time.format(formatter1)
        orderInfo.create_hour = time.format(formatter3)

        orderInfo

      })

    })

  }

  def writeDataAndOffsetsInCommonBatch(result: Array[((String, String), Double)], offsetRanges: Array[OffsetRange]):Unit = {

    // 累积的单词统计，有状态的计算
    val sql1=
      """
        |
        |INSERT INTO gmvstats VALUES(?,?,?)
        |ON DUPLICATE KEY UPDATE gmv=gmv + values(gmv)
        |
        |
        |
        |""".stripMargin

    val sql2=
      """
        |replace into offsets values(?,?,?,?)
        |
        |
        |
        |""".stripMargin


    var conn:Connection = null
    var ps1: PreparedStatement = null
    var ps2: PreparedStatement = null



    try {
      conn = JDBCUtil.getConnection()

      //取消事务的自动提交，改为手动
      conn.setAutoCommit(false)

      ps1 = conn.prepareStatement(sql1)
      ps2 = conn.prepareStatement(sql2)

      //构造写入data的insert语句
      for (((date, hour), gmv) <- result) {

        //写当前sparkstreaming批次的一行
        ps1.setString(1,date)
        ps1.setString(2,hour)
        ps1.setDouble(3,gmv)

        //攒到一个batch中，批量发送给mysql
        ps1.addBatch()

      }

      // 构造写入offset的insert语句
      for (offsetRange <- offsetRanges) {

        ps2.setString(1,groupId)
        ps2.setString(2,topic)
        ps2.setInt(3,offsetRange.partition)
        ps2.setLong(4,offsetRange.untilOffset)

        ps2.addBatch()

      }

      //开始写
      val dataReponse: Array[Int] = ps1.executeBatch()
      val offsetReponse: Array[Int] = ps2.executeBatch()

      conn.commit()

      println("数据写成功:"+dataReponse.size)
      println("偏移量写成功:"+offsetReponse.size)



    } catch {
      case e:Exception =>{
        conn.rollback()
        e.printStackTrace()
      }
    } finally {

      if(ps1 != null){
        ps1.close()

      }

      if(ps2 != null){
        ps2.close()

      }

      if(conn != null){

        conn.close()
      }

    }


  }

  def main(args: Array[String]): Unit = {

    context = new StreamingContext("local[*]", appName, Seconds(batchDuration))

    //查询之前消费的偏移量
    val offsetMap: Map[TopicPartition, Long] = readHistoryOffsets(groupId, topic)

    runApp{

      //基于上次消费的位置获取一个流
      val ds: InputDStream[ConsumerRecord[String, String]] = MyKafkaUtil.getKafkaStream(Array(topic), context, groupId, true, offsetMap)
      
      ds.foreachRDD(rdd => {
        
        if (!rdd.isEmpty()){

          //获取偏移量
          val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

          //解析样例类
          val rdd1: RDD[OrderInfo] = parseBean(rdd)

          //将计算结果收集到driver端
          val result: Array[((String, String), Double)] = rdd1.map(orderInfo => ((orderInfo.create_date, orderInfo.create_hour), orderInfo.total_amount))
            .reduceByKey(_ + _)
            .collect()

          //在一个事务中写出result和偏移量
          writeDataAndOffsetsInCommonBatch(result,ranges)

        }
        
      })




    }

  }
}
