package com.yuanshi.faceword


import java.text.SimpleDateFormat
import java.util.Date

import org.apache.commons.lang.time.DateUtils
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 统计用户&内容运营excel表格中的 颜世界内容运营_小象 的相关指标
  */
object FaceWordContentOpration {

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().enableHiveSupport().config("hive.exec.dynamic.partition", true)
      .config("hive.exec.dynamic.partition.mode", "nonstrict").appName("FaceWordContentOpration").getOrCreate()

    var dt: String = args(0)
    val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
    val dtNow: String = sdf.format(new Date())

    //统计小象app的全站日活,字段1
    while (dt <= dtNow) {
      val res1: DataFrame = spark.sql(
        s"""
           |select
           |count(distinct(device_id)) as elephant_user_dayact
           |from
           |t_user_operation_log
           |where dt = ${dt}
      """.stripMargin)
      res1.createTempView("v_res1")
      spark.sql(s"insert into faceword.elephant_user_dayact select * from v_res1 where dt = ${dt}")
      //重新设置dt的值，循环插入数据
      dt = sdf.format(DateUtils.addDays(sdf.parse(dt), 1))
    }

    //统计7天内新登录用户当日活跃数,字段2
    while (dt <= dtNow) {
      val res2: DataFrame = spark.sql(
        s"""
           |select
           |count(distinct(device_id)) as 7cnts
           |from
           |  (
           |    select
           |    *
           |    from
           |    t_user_operation_log t1
           |    join
           |      (
           |        select
           |        case when operation_time >= date_sub('${dt}',7) and operation_time <= current_date() then device_id end device_ids
           |        from
           |        t_user_operation_log
           |        where operation_type = '1001'
           |      ) t2
           |    on t1.device_id = t2.device_ids
           |  ) t3
           |where dt = ${dt}
      """.stripMargin)
      res2.createTempView("v_res2")
      spark.sql(s"insert into faceword.elephant_user_dayact select * from v_res2 where dt = ${dt}")
      //重新设置dt的值，循环插入数据
      dt = sdf.format(DateUtils.addDays(sdf.parse(dt), 1))
    }

    //统计颜世界的播放日活，曝光，点击，有效点击（率）,ctr以及阅读相关字段,字段3,4,5,6,7,8,9,10,11,12
    while (dt <= dtNow) {
      val res3: DataFrame = spark.sql(
        s"""
           |select
           |count(distinct(temporary_id_bf)) as show_cnts_bf,
           |exposure_cnts,
           |click_cnts,
           |effect_click_cnts,
           |effect_click_cnts/click_cnts as effect_click_rate,
           |click_cnts/exposure_cnts as ctr,
           |exposure_cnts/user_cnts as read_avg,
           |click_cnts/user_cnts as real_read_avg,
           |effect_click_cnts/user_cnts as effect_read_avg,
           |time_stop_total/user_cnts as time_use_avg
           |from
           |(
           |  select
           |  count(distinct(temporary_id)) as user_cnts,
           |  case when log_type = 2 then temporary_id end temporary_id_bf,
           |  count(if(log_type in (1,2),1,null)) as exposure_cnts,
           |  count(if(log_type = 2,1,null)) as click_cnts,
           |  count(if(play_duration >= 5,1,null)) as effect_click_cnts,
           |  sum(note_duration) as time_stop_total
           |  from
           |  face_value_circle_video_play_info
           |  where dt = ${dt} and ext2 = ''
           |) t1
      """.stripMargin)
      res3.createTempView("v_res3")
      spark.sql(s"insert into faceword.elephant_user_dayact partition(${dt}) select * from v_res3 where dt = ${dt}")
      //重新设置dt的值，循环插入数据
      dt = sdf.format(DateUtils.addDays(sdf.parse(dt), 1))
    }

    spark.close()
  }
}
