package cn.doitedu.dwetl

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-01-12
 * @desc 设备&账号关联得分计算
 *
 *       -- 目标表结构
 *       CREATE TABLE dwd.device_account_relation
 *       (
 *       deviceid   STRING,
 *       account    STRING,
 *       score      DOUBLE,
 *       first_time BIGINT,
 *       last_time  BIGINT
 *       )
 *       PARTITIONED BY (dt STRING)
 *       STORED AS PARQUET
 *       TBLPROPERTIES('parquet.compress','snappy')
 *       ;
 *
 *
 *       -- 计算策略
 *       基于T-1日的"关联得分表" 和 T日的行为日志，得出T日的"关联得分表"
 *
 *
 *
 *       -- 详细计算策略
 *       先统计T日的会话数，得出 设备-账号 的当日关联得分
 *       然后，将上一步的计算结果，去FULL JOIN T-1日的关联表，进行判断取值即可
 *
 *
 *       -- 结果数据的质量检查代码
 *       select
 *       deviceid
 *       from dwd.device_account_relation
 *       group by deviceid
 *       having count(1)>count(account) and count(1)>1
 *       -- 按条数统计如果大于按账号统计，且条数>1，则说明相同设备上，有带账号，也有不带账号的
 *       +-----------+
 *       | deviceid  |
 *       +-----------+
 *       +-----------+
 *
 *       -- 数值质量检查代码
 *       验证ods中源表的deviceid 基数  与  设备账号关联评分表中的 deviceid 基数
 *       select  count(distinct deviceid) from ods.event_app_log where dt='2021-01-10'
 *       union all
 *       select  count(distinct deviceid) from dwd.device_account_relation where dt='2021-01-10'
 *
 */
object DeviceAccountRelationScore {

  def main(args: Array[String]): Unit = {

    if(args.length<2){
      println(
        """
          |
          |wrong number of parameters!
          |usage:
          | args(0): T-1 date
          | args(1): T date
          |
          |""".stripMargin)
      sys.exit(1)
    }


    Logger.getLogger("org").setLevel(Level.WARN)

    val spark = SparkSession.builder()
      //.master("local[*]")
      .config("spark.sql.shuffle.partitions","10")
      .appName("设备账号关联得分计算")
      .enableHiveSupport()
      .getOrCreate()

    // 加载T-1日的设备账号关联评分表
    val relation = spark.read.table("dwd.device_account_relation").where(s"dt='${args(0)}'")
    // 加载T日的日志表
    val log = spark.read.table("ods.event_app_log").where(s"dt='${args(1)}'")

    // relation.show(10,false)
    // log.show(10,false)

    /**
     * 先聚合计算T日的日志中的设备&账号评分
     */
    log.createTempView("log_detail")
    val logAggr = spark.sql(
      """
        |select
        |deviceid,
        |if(trim(account)='',null,account) as account,
        |cast(count(distinct sessionid)*100 as double) as score,
        |min(timestamp)  as time
        |
        |from log_detail
        |group by deviceid,account
        |
        |
        |""".stripMargin)


    relation.createTempView("re")
    logAggr.createTempView("log")

    // 拿着当日的聚合评分，去关联 T-1日的聚合评分（历史全状态）
    // 本步骤没做任何取值处理
    val joined = spark.sql(
      """
        |
        |
        |select
        |  re.deviceid as re_deviceid,
        |  re.account as re_account,
        |  cast(re.score as double) as re_score,
        |  re.first_time as re_firsttime,
        |  re.last_time as re_lasttime,
        |  log.deviceid as log_deviceid,
        |  log.account as log_account,
        |  cast(log.score as double) as log_score,
        |  log.time as time
        |
        |from re full join log
        |on re.deviceid=log.deviceid and re.account=log.account
        |
        |
        |""".stripMargin)

    joined.createTempView("joined")

    /**
     * +-----------+----------+--------+------------+-----------+------------+-----------+---------+----+
     * |re_deviceid|re_account|re_score|re_firsttime|re_lasttime|log_deviceid|log_account|log_score|time|
     * +-----------+----------+--------+------------+-----------+------------+-----------+---------+----+
     * |d1         |c1        |200     |1           |10         |d1          |c1         |200      |11  |
     * |null       |null      |null    |null        |null       |d1          |c5         |100      |11  |
     * |null       |null      |null    |null        |null       |d6          |c6         |200      |11  |
     * |d3         |null      |null    |null        |null       |null        |null       |null     |null|
     * |null       |null      |null    |null        |null       |d5          |c5         |200      |11  |
     * |d2         |c2        |800     |2           |8          |null        |null       |null     |null|
     * |d2         |c3        |600     |3           |7          |d2          |c3         |100      |11  |
     * |d4         |c4        |200     |1           |10         |null        |null       |null     |null|
     * |null       |null      |null    |null        |null       |d7          |null       |null     |null|
     * |d5         |null      |null    |null        |null       |null        |null       |null     |null|
     * +-----------+----------+--------+------------+-----------+------------+-----------+---------+----+
     */

    /**
     * 在关联好的结果中，根据关联的不同情况进行取值
     */
    val res = spark.sql(
      """
        |
        |select
        |  nvl(re_deviceid,log_deviceid) as deviceid,
        |  nvl(re_account,log_account) as account,
        |  if(re_deviceid is not null and log_deviceid is null,re_score*0.7,nvl(re_score,0)+log_score)  score,
        |  nvl(re_firsttime,time)  first_time,
        |  nvl(time,re_lasttime) as last_time
        |
        |from joined
        |
        |""".stripMargin)



    /**
     * +--------+-------+-----+----------+---------+
     * |deviceid|account|score|first_time|last_time|
     * +--------+-------+-----+----------+---------+
     * |d1      |c1     |400.0|1         |11       |
     * |d1      |c5     |100.0|11        |11       |
     * |d6      |c6     |200.0|11        |11       |
     * |d3      |null   |null |null      |null     |
     * |d5      |c5     |200.0|11        |11       |
     * |d2      |c2     |560.0|2         |8        |
     * |d2      |c3     |700.0|3         |11       |
     * |d4      |c4     |140.0|1         |10       |
     * |d7      |null   |null |null      |null     |
     * |d5      |null   |null |null      |null     |
     * +--------+-------+-----+----------+---------+
     *
     */


    /**
     * 从上一步的结果中，去掉那些已经有了关联账号的空设备
     * 逻辑：  相同设备下，有账号的只会有一条，没有账号的也最多只有一条， 将这两类进行join，会连成一条
     * 生成最终结果
     */
    res.where("account is null").createTempView("t_null")
    res.where("account is not null").createTempView("t_you")
    spark.sql(
      s"""
        |
        |insert into table dwd.device_account_relation partition(dt='${args(1)}')
        |
        |select
        |  nvl(t_you.deviceid,t_null.deviceid) as deviceid,
        |  t_you.account as account,
        |  t_you.score as score,
        |  nvl(t_you.first_time,t_null.first_time) as first_time,
        |  nvl(t_you.last_time,t_null.last_time) as last_time
        |from t_null full join t_you on t_null.deviceid=t_you.deviceid
        |
        |""".stripMargin)

    /**
     * +--------+-------+-----+----------+---------+
     * |deviceid|account|score|first_time|last_time|
     * +--------+-------+-----+----------+---------+
     * |d2      |c2     |560.0|2         |8        |
     * |d2      |c3     |700.0|3         |11       |
     * |d3      |null   |null |null      |null     |
     * |d1      |c1     |400.0|1         |11       |
     * |d1      |c5     |100.0|11        |11       |
     * |d4      |c4     |140.0|1         |10       |
     * |d6      |c6     |200.0|11        |11       |
     * |d7      |null   |null |null      |null     |
     * |d5      |c5     |200.0|11        |11       |
     * +--------+-------+-----+----------+---------+
     */
    spark.close()
  }

}
