package com.yuanshi.faceword


import java.text.SimpleDateFormat
import java.util.Date

import org.apache.commons.lang.time.DateUtils
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 统计用户&内容运营excel表格中的 颜世界内容运营_小象 的相关指标
  */
object FaceWordContentOpration {

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().enableHiveSupport().config("hive.exec.dynamic.partition", true)
      .config("hive.exec.dynamic.partition.mode", "nonstrict").appName("FaceWordContentOpration").getOrCreate()

    var dt: String = args(0)
    val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
    val dtNow: String = sdf.format(new Date())

    //统计小象app的全站日活,dt:operation_time 字段1
    while (dt <= dtNow) {
      val res1: DataFrame = spark.sql(
        s"""
           |select
           |count(distinct device_id) as elephant_user_dayact
           |from
           |t_user_operation_log
           |where dt = '${dt}' and operation_type = 1006 and operation_content = '小象优品'
      """.stripMargin)
      res1.createTempView("v_res1")
      spark.sql(s"insert into faceword.elephant_user_dayact partition('${dt}') select * from v_res1 where dt = '${dt}'")
      //重新设置dt的值，循环插入数据
      dt = sdf.format(DateUtils.addDays(sdf.parse(dt), 1))
    }

    //统计7天内新登录用户当日活跃数,字段2
    while (dt <= dtNow) {
      val res2: DataFrame = spark.sql(
        s"""
           |select
           |count(distinct device_id) as user_login7_cnts
           |from
           |  (
           |    select
           |    *
           |    from
           |    t_user_operation_log t1
           |    join
           |      (
           |        select
           |        distinct case when operation_time >= date_sub('${dt}',7) then device_id end device_ids
           |        from
           |        t_user_operation_log
           |        where operation_type = 1001
           |      ) t2
           |    on t1.device_id = t2.device_ids
           |  ) t3
           |where dt = '${dt}' and operation_content = '小象优品'
      """.stripMargin)
      res2.createTempView("v_res2")
      spark.sql(s"insert into faceword.elephant_user_dayact partition('${dt}') select * from v_res2 where dt = '${dt}'")
      //重新设置dt的值，循环插入数据
      dt = sdf.format(DateUtils.addDays(sdf.parse(dt), 1))
    }

    //统计颜世界的播放日活，曝光，点击,ctr以及阅读相关字段,字段3,4,5,8,9,11,12（一次判断来源）
    while (dt <= dtNow) {
      val res3: DataFrame = spark.sql(
        s"""
           |select
           |count(distinct if(log_type = 2,temporary_id,null)) as user_cnts_bf,
           |count(if(log_type in (1,2),1,null)) as exposure_cnts,
           |count(if(log_type = 2,1,null)) as click_cnts,
           |count(if(log_type = 2,1,null))/count(if(log_type in (1,2),1,null)) as ctr,
           |count(if(log_type in (1,2),1,null))/count(distinct(temporary_id)) as read_avg,
           |sum(play_duration)/count(distinct(temporary_id)) as time_stop_avg
           |from
           |face_value_circle_video_play_info
           |where dt = '${dt}' and ext2 = ''
      """.stripMargin)
      res3.createTempView("v_res3")
      spark.sql(s"insert into faceword.elephant_user_dayact partition(${dt}) select * from v_res3 where dt = ${dt}")
      //重新设置dt的值，循环插入数据
      dt = sdf.format(DateUtils.addDays(sdf.parse(dt), 1))
    }

    //待测试前面几个字段统计值是否一致,dt:create_time
    while (dt <= dtNow) {
    spark.sql(
      s"""
        |select
        |count(distinct if(log_type = 2,temporary_id,null)) as user_cnts_bf,	-- 播放日活
        |count(if(log_type in (1,2),note_id,null)) as exposure_cnts,	-- 曝光
        |count(if(log_type = 2,note_id,null)) as click_cnts,	-- 点击
        |count(if(t2.note_type = 1 and t1.log_type = 2 and t1.play_duration >= t1.note_duration,t1.note_id,null))+
        |count(if(t2.note_type = 2 and t1.log_type = 2 and t1.play_duration >= 5,t1.note_id,null)) as effect_click_cnts,	-- 有效点击
        |(count(if(t2.note_type = 1 and t1.log_type = 2 and t1.play_duration >= t1.note_duration,t1.note_id,null))+
        |count(if(t2.note_type = 2 and t1.log_type = 2 and t1.play_duration >= 5,t1.note_id,null)))/count(if(log_type = 2,note_id,null)) as effect_click_rate,	-- 有效点击率
        |count(if(t1.ext1 = 1 and log_type = 2,note_id,null))/count(if(t1.ext1 = 1 and log_type in (1,2),note_id,null)) as ctr, -- ctr
        |count(if(log_type in (1,2),note_id,null))/count(distinct(temporary_id)) as read_avg, -- 人均阅读
        |count(if(log_type = 2,note_id,null))/count(distinct if(log_type = 2,temporary_id,null)) as real_read_avg,	-- 人均真实阅读
        |(count(if(t2.note_type = 1 and t1.log_type = 2 and t1.play_duration >= t1.note_duration,t1.note_id,null))+
        |count(if(t2.note_type = 2 and t1.log_type = 2 and t1.play_duration >= 5,t1.note_id,null)))/count(distinct t1.temporary_id) as effect_read_avg	-- 人均有效阅读
        |from
        |(select * from face_value_circle_video_play_info where dt = '${dt}' and ext2 = '') t1
        |join
        |(select * from face_value_circle_video where dt = '${dt}') t2
        |on t1.note_id = t2.id
      """.stripMargin)
      spark.sql(s"insert into faceword.elephant_user_dayact partition('${dt}') select * from v_res3 where dt = '${dt}'")
      //重新设置dt的值，循环插入数据
      dt = sdf.format(DateUtils.addDays(sdf.parse(dt), 1))
    }

    spark.close()
  }
}
