package com.yuanshi.faceword

import java.text.SimpleDateFormat
import java.util.Date

import org.apache.commons.lang.time.DateUtils
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 统计用户&内容运营excel表格中的 颜世界内容类型数据 的相关指标
  */
object FaceWordCommentType {

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().enableHiveSupport().config("hive.exec.dynamic.partition", true)
      .config("hive.exec.dynamic.partition.mode", "nonstrict").appName("FaceWordUserData").getOrCreate()

    var dt: String = args(0)
    val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
    val dtNow: String = sdf.format(new Date())

    while (dt <= dtNow) {
    for (i <- 1 to 2){
    val res1: DataFrame = spark.sql(
      s"""
        |select
        |count(if(log_type in (1,2),t1.note_id,null)), 	-- 视频/图文内容曝光
        |count(if(log_type = 2,t1.note_id,null)),		-- 视频/图文VV
        |count(distinct if(log_type = 2,temporary_id,null)),		-- 视频/图文UV
        |count(if(log_type = 2,1,null))/count(if(log_type in (1,2),1,null)),		-- 视频/图文ctr
        |count(if(log_type = 2,t1.note_id,null))/count(distinct temporary_id), -- 视频/图文人均阅读量/时长
        |count(if(log_type = 2,t1.note_id,null))/count(distinct if(log_type = 2,temporary_id,null)) -- 视频/图文人均真实阅读/时长
        |from
        |(select * from face_value_circle_video_play_info where dt = '${dt}' and ext2 = '') t1
        |join
        |(select * from face_value_circle_video where dt = '${dt}' and note_type = ${i} ) t2
        |on t1.temporary_id = t2.id
      """.stripMargin)
      res1.createTempView(s"v_res${i}")

      if (i == 1){
        spark.sql(s"insert into faceword.faceword_video_target select * from v_res${i}")
      }else{
        spark.sql(s"insert into faceword.faceword_video_target select * from v_res${i}")
      }
    }
      //重新设置dt的值，循环插入数据
      dt = sdf.format(DateUtils.addDays(sdf.parse(dt), 1))
    }

    spark.close()
  }
}
