package doit20.datayi.utils

import com.alibaba.fastjson.JSON
import org.apache.spark.sql.{SaveMode, SparkSession}

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-04-15
 * @desc 设备账号绑定评分表更新程序
 *       评分表建表语句：
 *       create table doit20dws.device_account_bind(
 *       device_id     string,
 *       account       string,
 *       score         float,
 *       last_access   bigint
 *       )
 *       partition by (dt string)
 *       stored as parquet
 *       ;
 *
 *       逻辑：
 *      1. 加载T日的日志，进行聚合（按会话聚合： deviceid,uid,session_cnt)
 *      2. 将上面的结果分成两类： uid = null?
 *      3. 将uid !=null 的数据，按（deviceid,uid）去join  T-1日评分表（该加分的加分，该减分的减分）
 *      4. 将uid ==null 的数据，按 （deviceid） 去join T-1日评分表（join不上的，插入）
 *
 */
object DeviceAccountBindTableUpdate {

  def main(args: Array[String]): Unit = {

    if(args.size<2){
      System.err.println(
        """
          |
          |usage:
          |  args(0): 计算日期
          |  args(1): 前一日期
          |
          |""".stripMargin)
      sys.exit(1)
    }

    var dt = args(0)
    var dtPre = args(1)

    val spark = SparkSession.builder()
      .appName("设备绑定评分表更新")
      //.config("spark.sql.shuffle.partitions", "1")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      //.master("local")
      .enableHiveSupport()
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._


    // 1. 加载T日的日志，进行聚合（按会话聚合： deviceid,uid,session_cnt)
    val logdata = spark.read.textFile(s"/user/hive/warehouse/doit20ods.db/app_event_log/dt=${dt}")
    val logTable = logdata.rdd.map(json => {
      val jSONObject = JSON.parseObject(json)
      val deviceId = jSONObject.getString("deviceId")
      val account = jSONObject.getString("account")
      val sessionId = jSONObject.getString("sessionId")
      val timeStamp = jSONObject.getLong("timeStamp")

      (deviceId, account, sessionId, timeStamp)
    }).toDF("deviceid", "account", "sessionid", "timestamp")

    //logTable.show(50)


    logTable.createTempView("log")

    val sessionAggr = spark.sql(
      """
        |
        |select
        |  deviceid,
        |  account,
        |  count(distinct sessionid) as session_cnt,
        |  max(timestamp) as last_access
        |from log
        |group by deviceid,account
        |
        |""".stripMargin)

    // sessionAggr.show(20,false)

    // 2. 将上面的结果分成两类： uid = null?
    sessionAggr.cache()

    val haveAccount = sessionAggr.where("account !='' and account is not null")
    haveAccount.createTempView("haveAccount")

    val noAccount = sessionAggr.where("account ='' or account is null")
    noAccount.createTempView("noAccount")

    // 3. 将uid !=null 的数据，按（deviceid,uid）去join  T-1日评分表（该加分的加分，该减分的减分）
    val bindTable = spark.read.table("doit20dws.device_account_bind").where(s"dt='${dtPre}'")
    bindTable.createTempView("bindTable")
    bindTable.cache()

    val res1 = spark.sql(
      s"""
        |
        |select
        |  nvl(a.deviceid,b.device_id) as device_id,
        |  nvl(a.account,b.account) as account,
        |  case
        |    when a.deviceid is not null and b.device_id is not null then b.score + a.session_cnt*100
        |    when a.deviceid is not null and b.device_id is null then a.session_cnt*100
        |    when a.deviceid is null  and b.device_id is not null then b.score * 0.95
        |  end as score,
        |  nvl(a.last_access,b.last_access) as last_access,
        |  '${dt}' as dt
        |from
        |  haveAccount a  -- 当日数据
        |full join
        |  bindTable b    -- 前日评分记录
        |on a.deviceid = b.device_id and a.account = b.account
        |
        |""".stripMargin)

    //res1.show(50, false)


    // 4. 将uid ==null 的数据，按 （deviceid） 去join T-1日评分表（join不上的，插入）
    // TODO 方案中，这种完全没有账号绑定的设备，是没必要进入  绑定评分表 的
    val res2 = spark.sql(
      s"""
        |
        |select
        | a.deviceid as device_id,
        | a.account as account,
        | b.score as score,
        | b.last_access as last_access,
        | '${dt}' as dt
        |from noAccount a left join bindTable b on a.deviceid=b.device_id
        |where b.device_id is null
        |
        |""".stripMargin)

    // 5. 整合两部分结果，写入hive
    res1.unionAll(res2).write.mode(SaveMode.Append).format("hive").partitionBy("dt").saveAsTable("doit20dws.device_account_bind")

    spark.close()
  }


}
