package com.doit.dophin.etl

import org.apache.spark.sql.SparkSession

/**
 * @DATE 2022/3/21/15:51
 * @Author MDK
 * @Version 2021.2.2
 *
 *    app端日志的session分割处理
 *    创建临时表:保存 会话切割 之后的结果
 create table tmp.mall_applog_session_split(
    account              string
    ,app_id              string
    ,app_version         string
    ,carrier             string
    ,device_id           string
    ,device_type         string
    ,event_id            string
    ,ip                  string
    ,latitude            double
    ,longitude           double
    ,net_type            string
    ,os_name             string
    ,os_version          string
    ,properties          map<string,string>
    ,release_channel     string
    ,resolution          string
    ,session_id          string
    ,ts                  bigint
    ,new_session_id      string
)
partitioned by (dt string)
stored as orc
tblproperties('orc.compress'='snappy');
 *
 *
 * */
object _2_ApplogSessionSplit {
  def main(args: Array[String]): Unit = {
    if(args.length != 1){
      println(
        """
          |usage: 请至少输入一个指定日期的参数
          |  参数一:待处理的日期,如2022-03-24
          |""".stripMargin)
      sys.exit(1)
    }

    val dt:String = args(0)

    val spark = SparkSession.builder()
      .appName("APP端日志数据,会话切割")
      .master("local")
      .enableHiveSupport()
      .getOrCreate()

    System.setProperty("HADOOP_USER_NAME", "root")
    spark.sql(
      s"""
        |insert into table tmp.mall_applog_session_split partition(dt='${dt}')
        |select
        |   account
        |   ,app_id
        |   ,app_version
        |   ,carrier
        |   ,device_id
        |   ,device_type
        |   ,event_id
        |   ,ip
        |   ,latitude
        |   ,longitude
        |   ,net_type
        |   ,os_name
        |   ,os_version
        |   ,properties
        |   ,release_channel
        |   ,resolution
        |   ,session_id
        |   ,ts
        |   -- 上一行
        |   ,concat_ws('-',session_id,sum(flag) over(partition by session_id order by ts)) as new_session_id
        |from
        |(
        |   select
        |     *,
        |     if(ts-lag(ts,1,ts) over(partition by session_id order by ts) > 30*60*1000, 1, 0) as flag
        |   from tmp.mall_applog_washed
        |   where dt='${dt}'
        |)o
        |""".stripMargin)


    spark.close()
  }

}
