package cn.doitedu.dw_etl

import cn.doitedu.dw_beans.ApplogBean
import cn.doitedu.dw_utils.Row2Bean
import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.time.{DateFormatUtils, DateUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

import java.text.SimpleDateFormat
import java.util.{Date, TimeZone, UUID}

/**
 * @author 涛哥
 * @nick_name "the love you deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-12-13
 * @desc app行为日志ods层表到dwd层表的etl之预处理
 *       清洗过滤
 *       格式转换（json->parquet）
 *       数据规范化
 */
object ApplogOds2DwdPreprocess {
  def main(args: Array[String]): Unit = {
    if(args.size<1){
      println(
        """
          |
          |params not enough!
          |usage:
          |  args(0): 要计算的分区日期
          |
          |""".stripMargin)
      sys.exit(1)
    }

    val dt = args(0)
    val dtNext: String = DateFormatUtils.format(DateUtils.addDays(DateUtils.parseDate(dt, "yyyy-MM-dd"), 1), "yyyy-MM-dd")

    val spark: SparkSession = SparkSession.builder()
      .appName("app行为日志ods层表到dwd层表的etl之预处理")
      //.master("local")
      .enableHiveSupport()
      .getOrCreate()
    import spark.implicits._

    // 读ods表
    val odsTable: Dataset[Row] = spark.read.table("ods.mall_app_log_dtl").where(s"dt='${dt}'").drop("dt")

    // 注册临时视图
    odsTable.createTempView("dtl")

    val toTimestamp = (ts: Long) => {
      DateFormatUtils.format(ts, "yyyy-MM-dd HH:mm:ss.SSS", TimeZone.getTimeZone("GMT+8"))
    }
    spark.udf.register("toTimestamp", toTimestamp)

    /**
     * 清洗之后的结果
     */
    val cleaned = spark.sql(
      s"""
        |
        |select
        | *
        |from dtl
        |-- deviceid/properties/eventid/sessionid
        |where
        |  deviceid is not null and
        |  properties is not null and
        |  eventid is not null and
        |  sessionid is not null and
        |--from_utc_timestamp(cast(`timestamp` as timestamp),'GMT+8') >= '2021-12-07 00:00:00.000'   and   from_utc_timestamp(cast(`timestamp` as timestamp),'GMT+8') < '2021-12-08 00:00:00.000'
        |  toTimestamp(`timestamp`) >= '${dt} 00:00:00.000'   and   toTimestamp(`timestamp`) < '${dtNext} 00:00:00.000'
        |""".stripMargin)


    /**
     * 数据规范化
     */

    val beanRdd: RDD[ApplogBean] = cleaned.rdd
      .map(Row2Bean.applogRow2Bean)
      .filter(_.isDefined)
      .map(_.get)

    val regulated: RDD[ApplogBean] = beanRdd.map(bean => {
      if (StringUtils.isBlank(bean.account)) bean.account = null
      bean
    })


    /**
     * 会话切割
     */
    val sessionSplited = regulated
      .groupBy(_.sessionid) // 按原始会话id分组
      .flatMap(tp => { // 对每一组（原始同会话）进行处理
        val actions: List[ApplogBean] = tp._2.toList.sortBy(_.timestamp)

        var newSessionId: String = UUID.randomUUID().toString

        for (i <- actions.indices) {
          // 为 bean中的 新会话id赋值
          actions(i).splitedSessionId = newSessionId
          // 如果下一条日志的时间戳距离当前条的时间戳> 30分钟，则更换newSessionId变量值
          if (i < actions.size - 1 && (actions(i + 1).timestamp - actions(i).timestamp > 30 * 60 * 1000)) {
            newSessionId = UUID.randomUUID().toString
          }
        }
        actions
      }).toDS()

    // 路径属于哪个文件系统，取决于一个参数： fs.defaultFS = file:///
    // sessionSplited.write.parquet("hdfs://doit01:8020/tmp/sessionSplited/2021-12-07/")
    sessionSplited.write.parquet(s"hdfs://doit01:8020/tmp/tasktmp/sessionSplited/${dt}/")

    spark.close()
  }
}
