package com.alison.sparkstream.action

import com.alibaba.druid.pool.DruidDataSourceFactory
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

import java.sql.{Connection, PreparedStatement, ResultSet}
import java.text.SimpleDateFormat
import java.util.{Date, Properties}
import javax.sql.DataSource

object E3_agg {

  """
    |广告点击量实时统计
    |1，需求说明
    |（1）实时统计每天各地区各城市各广告的点击总流量，并将其存入 MySQL。
    |（2）该需求实现步骤如下：
    |单个批次内对数据进行按照天维度的聚合统计
    |结合 MySQL 数据跟当前批次数据更新原有的数据
    |
    |1. 准备
    |CREATE TABLE area_city_ad_count (
    |    dt VARCHAR(20),
    |    area VARCHAR(20),
    |    city VARCHAR(20),
    |    adid VARCHAR(20),
    |    count BIGINT,
    |    PRIMARY KEY (dt,area,city,adid)
    |) engine = innodb default charset=utf8;
    |""".stripMargin

  def main(args: Array[String]): Unit = {

  }

  object MyKafkaUtil {
    // kafka 消费者配置
    val kafkaParam = Map(
      "bootstrap.servers" -> "localhost:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      //消费者组
      "group.id" -> "commerce-consumer-group",
      //如果没有初始化偏移量或者当前的偏移量不存在任何服务器上，可以使用这个配置属性
      //可以使用这个配置，latest 自动重置偏移量为最新的偏移量
      "auto.offset.reset" -> "latest",
      //如果是 true，则这个消费者的偏移量会在后台自动提交,但是 kafka 宕机容易丢失数据
      //如果是 false，会需要手动维护 kafka 偏移量
      "enable.auto.commit" -> (true: java.lang.Boolean)
    )

    // 创建 DStream，返回接收到的输入数据
    // LocationStrategies：根据给定的主题和集群地址创建 consumer
    // LocationStrategies.PreferConsistent：持续的在所有 Executor 之间分配分区
    // ConsumerStrategies：选择如何在 Driver 和 Executor 上创建和配置 Kafka Consumer
    // ConsumerStrategies.Subscribe：订阅一系列主题
    def getKafkaStream(topic: String, ssc: StreamingContext):
    InputDStream[ConsumerRecord[String, String]] = {
      val dStream: InputDStream[ConsumerRecord[String, String]] =
        KafkaUtils.createDirectStream[String, String](
          ssc,
          LocationStrategies.PreferConsistent,
          ConsumerStrategies.Subscribe[String, String](Array(topic), kafkaParam))
      dStream
    }
  }

  object JdbcUtil {
    //初始化连接池
    var dataSource: DataSource = init()

    //初始化连接池方法
    def init(): DataSource = {
      val properties = new Properties()
      properties.setProperty("driverClassName", "com.mysql.cj.jdbc.Driver")
      properties.setProperty("url", "jdbc:mysql://localhost:3306/spark")
      properties.setProperty("username", "root")
      properties.setProperty("password", "root")
      DruidDataSourceFactory.createDataSource(properties)
    }

    //获取 MySQL 连接
    def getConnection: Connection = {
      dataSource.getConnection
    }

    //执行 SQL 语句,单条数据插入
    def executeUpdate(connection: Connection, sql: String, params: Array[Any]): Int
    = {
      var rtn = 0
      var pstmt: PreparedStatement = null
      try {
        connection.setAutoCommit(false)
        pstmt = connection.prepareStatement(sql)
        if (params != null && params.length > 0) {
          for (i <- params.indices) {
            pstmt.setObject(i + 1, params(i))
          }
        }
        rtn = pstmt.executeUpdate()
        connection.commit()
        pstmt.close()
      } catch {
        case e: Exception => e.printStackTrace()
      }
      rtn
    }

    //执行 SQL 语句,批量数据插入
    def executeBatchUpdate(connection: Connection, sql: String, paramsList:
    Iterable[Array[Any]]): Array[Int] = {
      var rtn: Array[Int] = null
      var pstmt: PreparedStatement = null
      try {
        connection.setAutoCommit(false)
        pstmt = connection.prepareStatement(sql)
        for (params <- paramsList) {
          if (params != null && params.length > 0) {
            for (i <- params.indices) {
              pstmt.setObject(i + 1, params(i))
            }
            pstmt.addBatch()
          }
        }

        rtn = pstmt.executeBatch()
        connection.commit()
        pstmt.close()
      } catch {
        case e: Exception => e.printStackTrace()
      }
      rtn
    }

    //判断一条数据是否存在
    def isExist(connection: Connection, sql: String, params: Array[Any]): Boolean = {
      var flag: Boolean = false
      var pstmt: PreparedStatement = null
      try {
        pstmt = connection.prepareStatement(sql)
        for (i <- params.indices) {
          pstmt.setObject(i + 1, params(i))
        }
        flag = pstmt.executeQuery().next()
        pstmt.close()
      } catch {
        case e: Exception => e.printStackTrace()
      }
      flag
    }

    //获取 MySQL 的一条数据
    def getDataFromMysql(connection: Connection, sql: String, params: Array[Any]):
    Long = {
      var result: Long = 0L
      var pstmt: PreparedStatement = null
      try {
        pstmt = connection.prepareStatement(sql)
        for (i <- params.indices) {
          pstmt.setObject(i + 1, params(i))
        }
        val resultSet: ResultSet = pstmt.executeQuery()
        while (resultSet.next()) {
          result = resultSet.getLong(1)
        }
        resultSet.close()
        pstmt.close()
      } catch {
        case e: Exception => e.printStackTrace()
      }
      result
    }

  }

  """
    |用于处理广告点击数据的工具类 DateAreaCityAdCountHandler，主要涉及两个功能：统计每天各大区各个城市广告点击总数并保存至 MySQL 中
    |""".stripMargin

  object DateAreaCityAdCountHandler {
    //时间格式化对象
    private val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")

    /**
     * 统计每天各大区各个城市广告点击总数并保存至 MySQL 中
     *
     * @param filterAdsLogDStream 根据黑名单过滤后的数据集
     */
    def saveDateAreaCityAdCountToMysql(filterAdsLogDStream: DStream[Ads_log]): Unit = {
      //1.统计每天各大区各个城市广告点击总数
      val dateAreaCityAdToCount: DStream[((String, String, String, String), Long)] =
        filterAdsLogDStream.map(ads_log => {
          //a.取出时间戳
          val timestamp: Long = ads_log.timestamp
          //b.格式化为日期字符串
          val dt: String = sdf.format(new Date(timestamp))
          //c.组合,返回
          ((dt, ads_log.area, ads_log.city, ads_log.adid), 1L)
        }).reduceByKey(_ + _)

      //2.将单个批次统计之后的数据集合 MySQL 数据对原有的数据更新
      dateAreaCityAdToCount.foreachRDD(rdd => {
        //对每个分区单独处理
        rdd.foreachPartition(iter => {
          //a.获取连接
          val connection: Connection = JdbcUtil.getConnection
          //b.写库
          iter.foreach { case ((dt, area, city, adid), count) =>
            JdbcUtil.executeUpdate(connection,
              """
                |INSERT INTO area_city_ad_count (dt,area,city,adid,count)
                |VALUES(?,?,?,?,?)
                |ON DUPLICATE KEY
                |UPDATE count=count+?;
            """.stripMargin,
              Array(dt, area, city, adid, count, count))
          }
          //c.释放连接
          connection.close()
        })
      })
    }
  }

  case class Ads_log(timestamp: Long, area: String, city: String, userid: String, adid: String)

}
