package dm

import java.text.SimpleDateFormat
import java.util.Calendar

import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

object dm_daoliu_app {
  def main(args: Array[String]): Unit = {
    var  dateFormat:SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
    val cal: Calendar = Calendar.getInstance()
    cal.add(Calendar.DATE,-1)
    val yesterday: String = dateFormat.format(cal.getTime())



    val conf: SparkConf = new SparkConf().setAppName("dm.dm_daoliu_app")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()

    import spark.implicits._

    val tmp_a =
      """
        SELECT
          T.event_subject ,
          T.event_time,
          t.user_type,
          case when t2.event_subject is null  then  1 else 0 end as flag
         FROM
        (
               SELECT
                  distinct
                  t.user_id as event_subject ,
                  t.dt as event_time ,
                  t3.user_status as user_type
               from
                  dws.dws_event_user_active t
               left join dim.dim_user_affiliated_info t3 on t.user_id =t3.user_id
               where   t.event_time >= t3.start_time and   t.event_time <= t3.end_time
                      and   t.dt = '$dt'
             and t.source_system IN ('PC','WAP') --PC_M活跃的 人

          )T   -- pcm站 当天活跃的人数
          LEFT JOIN
          (

           SELECT DISTINCT   t.user_id as event_subject  from
               dws.dws_event_user_active t where dt<'2020-09-16'    and t.dt >='2019-08-01'
             and t.source_system ='APP'
           )t2
           on t.event_subject = t2.event_subject
        """.replace("$dt",yesterday)
    var insert_sql =
      """
         insert overwrite table  tmp.dm_daoliu_app partition(dt='$dt')
          SELECT
             ttt.event_time ,  -- as `日期`,
             ttt.user_type, --  as `身份`,
             ttt.dl,   --  `导流到app人数`,
             tt3.cnt   as pc_cnt, --   `pcm活跃人数`,
             tt4.cnt --  as  `pcm活跃人数排除app人数`
          FROM

          (
               SELECT
               tt2.event_time  ,
               tt2.user_type  ,
               count(DISTINCT tt2.dl) dl

              FROM
             (
               SELECT
               DISTINCT t.user_id as event_subject
               from
                       dws.dws_event_user_base t
                       where t.dt= '$dt'
                       and t.source_system='APP'
              )tt  -- tt 表 是 app 当天新增的人数

             join (
                  SELECT
                       t.event_subject ,
                       t.event_time,
                       t.user_type,
                       t.event_subject as dl
                    FROM  tmp_a t   -- 从上表pcm活跃人数中抛出 统计日期之前在 app有行为的人数
                     where flag=1
              )tt2  on tt.event_subject = tt2. dl   -- tt2 表示 当天app 新增人数中 pcm导流过来的 数据
              GROUP BY tt2.user_type , tt2.event_time
             )ttt
             left join
             (
                     SELECT    event_time,
                        count(distinct  event_subject) cnt  ,
                         user_type
                       from  tmp_a
                                 GROUP BY  event_time,  user_type

             )tt3 on ttt.event_time = tt3.event_time and ttt.user_type = tt3.user_type -- pcm站当天活跃人数
             left join
             (
                SELECT  t.event_time,
                 count(distinct t.event_subject) as cnt ,
                   t.user_type
               FROM  tmp_a t
               where t.flag=1
                  GROUP BY t.event_time,  t.user_type
             )tt4 -- pcm站当天活跃人数抛出历史在app有行为人数
              on ttt.event_time = tt4.event_time and ttt.user_type = tt4.user_type
        """.replace("$dt",yesterday)

    print(tmp_a)
    print("····························正式开始执行························")

    //1. 清除dm_daoliu_app表数据
    spark.sql("truncate table tmp.dm_daoliu_app")

    //2. 配置执行参数
    spark.sql("set hive.exec.dynamici.partition=true")
    spark.sql("set hive.exec.dynamic.partition.mode=nonstrict")
    spark.sql("set spark.sql.adaptive.enabled=true")
//    spark.sql("set spark.sql.shuffle.partitions=2000")
    //3. 创建临时表tmp_a

    val tmp_a_df: DataFrame = spark.sql(tmp_a)
    tmp_a_df.createOrReplaceTempView("tmp_a")
    spark.sql(insert_sql)
    spark.stop()
  }
}
