package cn.ipanel.bigdata.job.basic

import cn.ipanel.bigdata.boot.Job
import cn.ipanel.bigdata.boot.logger.Logger
import cn.ipanel.bigdata.dw.dim.phoenix.{T_DEVICE => D}
import cn.ipanel.bigdata.dw.ods.{T_REPORT => R}
import cn.ipanel.bigdata.utils.Dictionary.{Device, INVALID_INT, INVALID_LONG, Service, BusState}
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{col, from_unixtime, lit, row_number, when}

/**
 * @author lzz
 * @environment IntelliJ IDEA 2020.3.1
 * @projectName bigdata_panyu
 * @date 2023/11/25 10:44
 * @description: 底量设备表，包含了新增，删除，修改的设备
 */
class DeviceDetail extends Job {

  override def onStartup(): Unit = {
    val df = R.find(_DAY)
      .filter(col(R.F_SERVICE) === lit(Service.SERVICE_DEVICE))
      .withColumn("unix_time", Functions.func_mapToL(col(R.F_EXTRA), lit(Device.FIELD_STATUS_MODIFY_TIME), lit(0)))
      .withColumn(D.F_STATUS, Functions.func_mapToI(col(R.F_EXTRA), lit(Device.FIELD_ACTION), lit(BusState.INVALID)))
      .withColumnRenamed(R.F_DEVICE_ID, D.F_DEVICE_ID)
      .withColumn(D.F_REGION, Functions.func_mapToL(col(R.F_EXTRA), lit(Device.FIELD_AREA_CODE), lit(INVALID_LONG)))
      .filter(col(D.F_STATUS) =!= lit(BusState.INVALID) and col(D.F_REGION) =!= lit(INVALID_LONG))
      .withColumn(D.F_DEVICE_CLASS, Functions.func_mapToS(col(R.F_EXTRA), lit(Device.FIELD_CLASS), lit(null)))
      .withColumn(D.F_UPDATE_TIME, when(col(D.F_STATUS) =!= lit(BusState.ADD), from_unixtime(col("unix_time"))).otherwise(null))
      .withColumn(D.F_CREATE_TIME, when(col(D.F_STATUS) === lit(BusState.ADD), from_unixtime(col("unix_time"))).otherwise(null))
      // 防止多次编辑的情况，以最近的为准
      .withColumn("rank_num", row_number() over Window.partitionBy(D.F_DEVICE_ID).orderBy(col(R.F_TIMESTAMP).desc))
      .filter(col("rank_num") === lit(1))
      .select(D.getTBColumns.head, D.getTBColumns.tail: _*)

    df.persist()
    Logger.I("new device from report count: " + df.count())
    Logger.I("new device from report details: ")
    df.show(false)
    if(df.isEmpty) {
      return
    }

    import spark.implicits._
    // 已经删除的底量数据不动
    val df0 = D.load.filter(col(D.F_STATUS) =!= lit(BusState.DEL))
      .select(D.getTBColumns.head, D.getTBColumns.tail: _*)
    df0.persist()
    df0.show(5, false)

    val res = df0
      .union(
        // 让添加的设备，两个时间一样
        df.filter(col(D.F_STATUS) === lit(BusState.ADD))
          .withColumn(D.F_UPDATE_TIME, col(D.F_CREATE_TIME))
      )
      .alias("a")
      .join(
        // 修改和删除的设备，一定在底量表里面，left join即可
        df.filter(col(D.F_STATUS) =!= lit(BusState.ADD)).alias("b")
        ,$"a.${D.F_DEVICE_ID}" === $"b.${D.F_DEVICE_ID}"
        ,"left"
      )
      .selectExpr(
        // left join 后，凡是 b 表字段为空，均代表这是新增的设备
        s"if(b.${D.F_REGION} is null, a.${D.F_REGION}, b.${D.F_REGION}) as ${D.F_REGION}",
        s"if(b.${D.F_STATUS} is null, a.${D.F_STATUS}, b.${D.F_STATUS}) as ${D.F_STATUS}",
        s"a.${D.F_DEVICE_ID}",
        s"if(b.${D.F_DEVICE_CLASS} is null, a.${D.F_DEVICE_CLASS},b.${D.F_DEVICE_CLASS}) as ${D.F_DEVICE_CLASS}",
        // f_create_time 在编辑和删除都不能修改
        s"a.${D.F_CREATE_TIME}",
        s"if(b.${D.F_UPDATE_TIME} is null, a.${D.F_UPDATE_TIME},b.${D.F_UPDATE_TIME}) as ${D.F_UPDATE_TIME}"
      )
//    res.persist()
//    res.filter(col(D.F_DEVICE_ID).isin("test_1", "test_2", "test_3", "007d76b9705544a2892b152f999dd05a", "0a357b7536614c73aae8a550c8157d3f"))
//      .show(false)
//    res.printSchema()
    D.save(res)

  }

}