package cn.ipanel.bigdata.job.realtime

import cn.ipanel.bigdata.boot.Job
import cn.ipanel.bigdata.boot.config.{ConfigKey, Configuration}
import cn.ipanel.bigdata.boot.date.{Date, Day, Minute}
import cn.ipanel.bigdata.boot.logger.Logger
import cn.ipanel.bigdata.dw.dim.phoenix.{T_DEVICE => D}
import cn.ipanel.bigdata.dw.dim.phoenix.{T_CLASS_TAG => TAG}
import cn.ipanel.bigdata.dw.ods.T_HDFS_LOG._
import cn.ipanel.bigdata.dw.realtime.{T_ALARM => T_A, T_ALARM_DETAIL => T_A_D, T_NETWORK => T_N, T_NETWORK_DETAIL => T_N_D}
import cn.ipanel.bigdata.utils.Dictionary.{F_PERIOD_TIME, F_REGION, INVALID_INT, Network, Service}
import cn.ipanel.bigdata.utils.Util.{mapToI, mapToS, strToI, strToL, transNullAndEmpryAsString}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions.{col, count, countDistinct, lit, sum, when}
import org.apache.spark.sql.types.{IntegerType, LongType}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * @author lzz
 * @environment IntelliJ IDEA 2020.3.1
 * @projectName bigdata_shanxi
 * @date 2023/03/28 16:52
 * @description: 实时网络质量和报警统计
 *               kafka-console-consumer --bootstrap-server master:9092,slave1:9092,slave2:9092 --topic bigdata-test
 *               select * from bigdata.t_task_stat where f_task_name = 'realtime.Realtime10'  and (f_table_name = 't_device_network' or f_table_name ='t_alarm_by_tag') order by f_start_time desc;
 */
class Realtime10 extends Job {

  var now :Minute = _

  override def onStartup(): Unit = {
    val SECONDS = Configuration.getParam(ConfigKey.REALTIME_10_INTERVAL_SECONDS, "60").toInt
    val BOOTSTRAP_SERVERS = Configuration.getParam(ConfigKey.REALTIME_BOOTSTRAP_SERVERS)
    val GROUP_ID = Configuration.getParam(ConfigKey.REALTIME_10_GROUP_ID)
    val TOPIC = Configuration.getParam(ConfigKey.REALTIME_TOPICS)
    val MASTER = Configuration.spark.master
    val JOB_NAME = this.getClass.getSimpleName

    Logger.I("seconds: " + SECONDS)
    Logger.I("bootstrapServers: " + BOOTSTRAP_SERVERS)
    Logger.I("groupId: " + GROUP_ID)
    Logger.I("topic: " + TOPIC)
    Logger.I("master: " + MASTER)
    Logger.I("job_name: " + JOB_NAME)

    // 初始化sparkConf
    val conf = new SparkConf().setMaster(MASTER).setAppName(JOB_NAME)
    val ssc = new StreamingContext(conf, Seconds(SECONDS))

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> BOOTSTRAP_SERVERS,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      // 注意：1. 只要不更改group.id，每次重新消费kafka，都是从上次消费结束的地方继续开始(因为消费有提交offset)，不论"auto.offset.reset”属性设置的是什么
      //      2. 使用新的group.id，默认重最近offset处消费（相当于设置了("auto.offset.reset" -> "latest")；
      //         如果要重最前的offset处消费，必须设置 ("auto.offset.reset" -> "earliest")
      // 这一点跟使用命令 kafka-consumer-groups --bootstrap-server master:9092 --execute --reset-offsets --to-latest --topic lzz-test --group lzz-group
      // 来重置 offset 使后面重新消费是不一样的
      "group.id" -> GROUP_ID,
      "enable.auto.commit" -> (true: java.lang.Boolean)
    )
    val topics = Array(TOPIC.split(","): _*)
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )

    import spark.implicits._
    stream.foreachRDD(rdd => {
      val dataDf = rdd
        .map(x => x.value())
        .filter(x => Table.lineIsValid(x))
        .map(Table(_))
        // 所有原始数据
        .toDF()
        .withColumn(F_SERVICE, col(F_SERVICE).cast(IntegerType))
        // 错误码数据
        .filter(col(F_SERVICE).isin(Service.SERVICE_NET, Service.SERVICE_ALARM))

      dataDf.persist()
      Logger.I(s"latest ${SECONDS}s record count: " + dataDf.count())
      if(!dataDf.isEmpty) {
        now = Date.asMinute
        // 包括 class, tag, region, device_id
        val deviceDf = getClassTagDf()
                          .join(
                            // 补充 TAG 分类
                            D.load
                              .selectExpr(D.F_DEVICE_ID, D.F_REGION, D.F_DEVICE_CLASS)
                              .withColumnRenamed(D.F_DEVICE_ID, F_DEVICE_ID)
                              .withColumnRenamed(D.F_REGION, F_REGION)
                            , Seq(D.F_DEVICE_CLASS)
                          )
        deviceDf.persist()

        saveNet(dataDf.filter(col(F_SERVICE) === lit(Service.SERVICE_NET)), deviceDf)
        saveAlarm(dataDf.filter(col(F_SERVICE) === lit(Service.SERVICE_ALARM)), deviceDf)

        deviceDf.unpersist()
      }


      dataDf.unpersist()
    })
    ssc.start
    ssc.awaitTermination
  }

  /**
   * 之所以要尝试两次，是有可能凌晨的实时任务，要获取的上一天 tag 数据，可能没来及刷新，这里采用上上一天的数据
   * @return
   */
  def getClassTagDf(): DataFrame = {
    var df = TAG.load
              .filter(col(F_DATE) === lit(now.asDay.prev().toDate))
              .select(TAG.F_TAG, TAG.F_CLASS)
              .withColumnRenamed(TAG.F_CLASS, D.F_DEVICE_CLASS)

    if (df.isEmpty) {
      df = TAG.load
        .filter(col(F_DATE) === lit(now.asDay.prev(2).toDate))
        .select(TAG.F_TAG, TAG.F_CLASS)
        .withColumnRenamed(TAG.F_CLASS, D.F_DEVICE_CLASS)
    }

    df
  }

  def saveNet(reportDf: DataFrame, deviceDf: DataFrame): Unit = {
    val F_RATE = "_rate"
    val date = now.toDate

    val aggs = Seq(sum(col(T_N_D.F_EXCELLENT_NUM)) as T_N_D.F_EXCELLENT_NUM,
      sum(col(T_N_D.F_GOOD_NUM)) as T_N_D.F_GOOD_NUM,
      sum(col(T_N_D.F_BAD_NUM)) as T_N_D.F_BAD_NUM)

    // 按天存储网络质量明细
    val df0 = reportDf
      .withColumn(F_RATE, Functions.func_mapToI(col(F_EXTRA), lit(Network.FIELD_LOST_PACKET_RATE), lit(INVALID_INT)))
      // 没有上报丢包率数据的，过滤掉
      .filter(col(F_RATE) =!= INVALID_INT)
      .withColumn(T_N_D.F_EXCELLENT_NUM, when(col(F_RATE) < lit(500), lit(1)).otherwise(lit(0)))
      .withColumn(T_N_D.F_GOOD_NUM, when(col(F_RATE) >= lit(500) and col(F_RATE) < lit(3000), lit(1)).otherwise(lit(0)))
      .withColumn(T_N_D.F_BAD_NUM, when(col(F_RATE) >= lit(3000), lit(1)).otherwise(lit(0)))
      .join(
        deviceDf, Seq(F_DEVICE_ID)
      )
      .groupBy(T_N_D.F_DATE, T_N_D.F_REGION, T_N_D.F_DEVICE_ID)
      .agg(
        aggs.head, aggs.tail:_*
      )
      .withColumn(T_N_D.F_DATE, lit(date))

    if (df0.isEmpty) {
      Logger.I("no package lose data report now")
      return
    }

    // 合并天数据
    val df1 = df0.select(T_N_D.getTBColumns.head, T_N_D.getTBColumns.tail: _*)
      .union(T_N_D.load.filter(col(F_DATE) === lit(date)))
      .groupBy(T_N_D.F_DATE, T_N_D.F_REGION, T_N_D.F_DEVICE_ID)
      .agg(
        aggs.head, aggs.tail:_*
      )
    df1.persist()
    T_N_D.save(df1)

    T_N.save(
      df1.groupBy(T_N_D.F_DATE, F_REGION)
        .agg(
          countDistinct(F_DEVICE_ID) as T_N.F_DEVICE_COUNT,
          sum(col(T_N_D.F_EXCELLENT_NUM)) as T_N.F_EXCELLENT_NUM,
          sum(col(T_N_D.F_GOOD_NUM)) as T_N.F_GOOD_NUM,
          sum(col(T_N_D.F_BAD_NUM)) as T_N.F_BAD_NUM
        )
        .withColumn(T_N.F_PERIOD_TIME, lit(date))
    )

    df1.unpersist()

    saveJobStat(0, realtimeTaskRefresh = true, tableName = T_N.getTBName)
  }

  def saveAlarm(reportDf: DataFrame, deviceDf: DataFrame): Unit = {
    val date = now.toDate
    // 按天存储用户告警明细
    val df0 = reportDf
                .join(
                  deviceDf, Seq(F_DEVICE_ID)
                ).groupBy(F_REGION, F_DEVICE_ID, TAG.F_TAG)
                .agg(
                  // 按天存用户的告警次数
                  count(F_DEVICE_ID) as T_A_D.F_ALARM_COUNT
                )
                .withColumn(T_A_D.F_DATE, lit(date))

    if (df0.isEmpty) {
      Logger.I("no alarm data report now")
      return
    }

    val df1 = df0.select(T_A_D.getTBColumns.head, T_A_D.getTBColumns.tail:_*)
      .union(T_A_D.load.filter(col(T_A_D.F_DATE) === lit(date)))
      .groupBy(T_A_D.F_DATE, T_A_D.F_REGION, T_A_D.F_DEVICE_ID, T_A_D.F_TAG)
      .agg(
        sum(col(T_A_D.F_ALARM_COUNT)) as T_A_D.F_ALARM_COUNT
      )
    df1.persist()
    T_A_D.save(df1)

    T_A.save(
      df1.groupBy(T_A_D.F_REGION, T_A_D.F_TAG)
        .agg(
          countDistinct(F_DEVICE_ID) as T_A.F_DEVICE_COUNT,
          sum(col(T_A_D.F_ALARM_COUNT)) as T_A.F_ALARM_COUNT
        )
        .withColumn(T_A.F_PERIOD_TIME, lit(date))
    )

    df1.unpersist()

    saveJobStat(0, realtimeTaskRefresh = true, T_A.getTBName)
  }


}