package cn.ipanel.bigdata.job.basic

import cn.ipanel.bigdata.boot.Job
import cn.ipanel.bigdata.boot.logger.Logger
import cn.ipanel.bigdata.dw.dim.hive.{T_DEVICE_DAILY_STATUS => S}
import cn.ipanel.bigdata.dw.dim.phoenix.{T_DEVICE => D}
import cn.ipanel.bigdata.dw.realtime.{T_ONLINE_DEVICE_CT => T}
import cn.ipanel.bigdata.utils.Dictionary.DeviceStat._
import cn.ipanel.bigdata.utils.Dictionary.{F_DEVICE_ID, F_PERIOD_TIME, F_REGION, F_STATUS}
import org.apache.spark.sql.functions.{col, countDistinct, lit, when}


/**
 * @author lzz
 * @environment IntelliJ IDEA 2020.3.1
 * @projectName bigdata_panyu
 * @date 2023/11/25 10:44
 * @description: 按天统计在线设备，统计逻辑：每天最后一秒，统计在线设备，在线的就算天设备，为了防止跨天需要捞取上报的情况，
 *              每天生成一份数据，第二天基于第一天来
 */
class OnlineDeviceByDay extends Job {

  override def onPrepare(): Unit = {
    Logger.I("date: " + _DAY.toDate)
    T.deleteByPeriodTime(_DAY.toSimpleDate)
  }

  override def onStartup(): Unit = {
    import spark.implicits._
    val res = S.load
      .filter(col(F_STATUS) === lit(ONLINE))
      .select(F_DEVICE_ID, F_STATUS)
      .alias("a")
      .join(
        D.loadPeriodValidDevice(_DAY)
          .select(D.F_DEVICE_ID, D.F_REGION)
          .withColumn(F_STATUS, lit(OFFLINE))
          .alias("b")
        , $"a.${F_DEVICE_ID}" === $"b.${F_DEVICE_ID}"
        , "right"
      )
      .selectExpr(
        s"b.$F_DEVICE_ID",
        s"b.$F_REGION",
        s"if(a.$F_STATUS is not null, a.$F_STATUS, b.$F_STATUS) as $F_STATUS"
      )
      .groupBy(F_REGION)
      .agg(
        countDistinct(when(col(F_STATUS) === lit(ONLINE), col(F_DEVICE_ID)).otherwise(null)) as T.F_ONLINE_NUM,
        countDistinct(when(col(F_STATUS) === lit(OFFLINE), col(F_DEVICE_ID)).otherwise(null)) as T.F_OFFLINE_NUM
      )
      .withColumn(F_PERIOD_TIME, lit(_DAY.toSimpleDate))
    res.persist()
    res.show(false)

    T.save(res)
  }


}
