package cn.ipanel.bigdata.job.basic

import cn.ipanel.bigdata.boot.Job
import cn.ipanel.bigdata.boot.config.{ConfigKey, Configuration}
import cn.ipanel.bigdata.boot.logger.Logger
import cn.ipanel.bigdata.dw.DetailColumns
import cn.ipanel.bigdata.dw.dim.phoenix.{IotArea, T_AREA => A, T_AREA_UPDATE_TIME => AT, T_COUNTY => C, T_NEIGHBOR_HOOD => N, T_TOWN => T}
import cn.ipanel.bigdata.dw.ods.{T_REPORT => R}
import cn.ipanel.bigdata.utils.Dictionary._
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{col, _}
import org.apache.spark.sql.types.{LongType, StringType}

/**
 * @author lzz
 * @environment IntelliJ IDEA 2020.3.1
 * @projectName bigdata_panyu
 * @date 2023/11/25 10:44
 * @description: 底量区域统计
 *        1.全量历史区域包含了新增，删除，修改的区域，
 *        2.按更新日期，会生成一份区域树
 */
class AreaDetail extends Job with IotArea with DetailColumns {

  override def onStartup(): Unit = {
    val f_county_id = Configuration.getParam(ConfigKey.ROOT_AREA_CODE)
    val f_county_name = Configuration.getParam(ConfigKey.ROOT_AREA_NAME)
    R.find(_DAY)
      .filter(col(R.F_SERVICE) === lit(Service.SERVICE_AREA)).show(false)

    val df = R.find(_DAY)
      .filter(col(R.F_SERVICE) === lit(Service.SERVICE_AREA))
      .withColumn("unix_time", Functions.func_mapToL(col(R.F_EXTRA), lit(Area.FIELD_STATUS_MODIFY_TIME), lit(0)))
      .withColumn(F_CODE, Functions.func_mapToL(col(R.F_EXTRA), lit(Area.FIELD_AREA_CODE), lit(INVALID_LONG)))
      .withColumn(F_NAME, Functions.func_mapToS(col(R.F_EXTRA), lit(Area.FIELD_AREA_NAME), lit(UNKNOWN)))
      .withColumn(F_PARENT_CODE, Functions.func_mapToL(col(R.F_EXTRA), lit(Area.FIELD_PARENT_CODE), lit(INVALID_LONG)))
      .withColumn(F_LEVEL, Functions.func_mapToI(col(R.F_EXTRA), lit(Area.FIELD_AREA_LEVEL), lit(INVALID_INT)))
      .withColumn(F_STATUS, Functions.func_mapToI(col(R.F_EXTRA), lit(Area.FIELD_ACTION), lit(BusState.INVALID)))
      .withColumn(F_UPDATE_TIME, when(col(F_STATUS) =!= lit(BusState.ADD), from_unixtime(col("unix_time"))).otherwise(null))
      .withColumn(F_CREATE_TIME, when(col(F_STATUS) === lit(BusState.ADD), from_unixtime(col("unix_time"))).otherwise(null))
      .filter(
        col(F_CODE) =!= lit(INVALID_LONG)
          and col(F_NAME) =!= lit(UNKNOWN)
          and col(F_PARENT_CODE) =!= lit(INVALID_LONG)
          and col(F_LEVEL) =!= lit(INVALID_INT)
          and col(F_STATUS) =!= lit(BusState.INVALID)
          and col("unix_time") =!= lit(0)
      )
      // 防止多次编辑的情况，以最近的为准
      .withColumn("rank_num", row_number() over Window.partitionBy(F_CODE).orderBy(col(R.F_TIMESTAMP).desc))
      .filter(col("rank_num") === lit(1))

    df.persist()
    df.show(false)
    if(df.isEmpty) {
      Logger.I(_DAY.toDate + " area data is null")
      return
    }

    // 必须先保存4级区域，再保存5级区域
    val dfTown = df.filter(col(F_LEVEL) === lit(LEVEL_TOWN)).select(T.getTBColumns.head, T.getTBColumns.tail: _*)
    if (!dfTown.isEmpty) {
      T.save(
        merge(T.load.filter(col(F_STATUS) =!= lit(BusState.DEL)), dfTown)
      )
    }

    val dfNeighborhood = df.filter(col(F_LEVEL) === lit(LEVEL_NEIGHBOR_HOOD)).select(T.getTBColumns.head, T.getTBColumns.tail: _*)
    if (!dfNeighborhood.isEmpty) {
      N.save(
        merge(N.load.filter(col(F_STATUS) =!= lit(BusState.DEL)), dfNeighborhood)
      )
    }

    import spark.implicits._
    val allTown = T.load.filter(col(F_STATUS) =!= lit(BusState.DEL))
    val allNeighborhood = N.load.filter(col(F_STATUS) =!= lit(BusState.DEL))
    val allCounty = C.load.filter(col(F_STATUS) =!= lit(BusState.DEL))
    val res = allTown
      .alias("a")
      .join(allNeighborhood.alias("b"), $"a.$F_CODE" === $"b.$F_PARENT_CODE")
      .selectExpr(
        s"b.$F_CODE as $F_ID",
        s"$f_county_id as $F_COUNTY_ID",
        s"'$f_county_name' as $F_COUNTY_NAME",
        s"a.$F_CODE as $F_TOWN_ID",
        s"a.$F_NAME as $F_TOWN_NAME",
        s"b.$F_CODE as $F_NEIGHBORHOOD_ID",
        s"b.$F_NAME as $F_NEIGHBORHOOD_NAME",
        s"$LEVEL_NEIGHBOR_HOOD as $F_LEVEL"
      )
      .union(
        allTown
          .selectExpr(
            s"$F_CODE as $F_ID",
            s"$f_county_id as $F_COUNTY_ID",
            s"'$f_county_name' as $F_COUNTY_NAME",
            s"$F_CODE as $F_TOWN_ID",
            s"$F_NAME as $F_TOWN_NAME",
            s"-1 as $F_NEIGHBORHOOD_ID",
            s"null as $F_NEIGHBORHOOD_NAME",
            s"$LEVEL_TOWN as $F_LEVEL"
          )
      )
      .union(
        allCounty.selectExpr(
          s"$F_CODE as $F_ID",
          s"$F_CODE as $F_COUNTY_ID ",
          s"$F_NAME as $F_COUNTY_NAME",
          s"-1 as $F_TOWN_ID",
          s"null as $F_TOWN_NAME",
          s"-1 as $F_NEIGHBORHOOD_ID",
          s"null as $F_NEIGHBORHOOD_NAME",
          s"$LEVEL_COUNTY as $F_LEVEL"
        )
      )
      .withColumn(F_DATE, lit(_DAY.toDate))

    res.persist()

    A.deleteByFullDay(_DAY)
    A.save(res)
    AT.save(AT.createSingleRowDataset(_DAY, res.count()))
  }

  def merge(existDf: DataFrame, addUpdateDf: DataFrame): DataFrame = {
    import spark.implicits._
    existDf.union(
      // 让添加的区域，添加和修改时间一样
      addUpdateDf.filter(col(F_STATUS) === lit(BusState.ADD))
        .withColumn(T.F_UPDATE_TIME, col(T.F_CREATE_TIME))
    ).alias("a")
      .join(
        // 修改和删除的区域，一定在底量表里面，left join即可
        addUpdateDf.filter(col(F_STATUS) =!= lit(BusState.ADD)).alias("b")
        , $"a.${F_CODE}" === $"b.${F_CODE}"
        , "left"
      )
      .selectExpr(
        s"a.$F_CODE",
        // left join 后，凡是 b 表字段为空，均代表这是新增的家庭
        s"if(b.${F_NAME} is null, a.${F_NAME}, b.${F_NAME}) as ${F_NAME}",
        s"if(b.${F_PARENT_CODE} is null, a.${F_PARENT_CODE}, b.${F_PARENT_CODE}) as ${F_PARENT_CODE}",
        // f_level 在编辑和删除都不能修改
        s"a.$F_LEVEL as $F_LEVEL",
        s"if(b.$F_STATUS is null, a.$F_STATUS, b.$F_STATUS) as ${F_STATUS}",
        // f_create_time 在编辑和删除都不能修改
        s"a.${F_CREATE_TIME}",
        s"if(b.${F_UPDATE_TIME} is null, a.${F_UPDATE_TIME}, b.${F_UPDATE_TIME}) as ${F_UPDATE_TIME}"
      )
  }

}