package cn.doitedu.datayi.etl

import org.apache.commons.lang3.StringUtils
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

import scala.collection.mutable

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-08-12
 * @desc 事件归因分析
 */
object EventAttribute {

  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)

    val spark = SparkSession.builder()
      .appName("事件归因分析")
      .enableHiveSupport()
      .master("local")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    import spark.implicits._

    // 过滤
    // g01, ["1_e1","2_e1","3_e2","4_X","5_e1","7_e1","8_X","9_e2"]
    // g02, ["1_e1","2_e1","3_e2","4_X","5_e1","7_e1","8_X","9_e2"]
    // g03, ["1_e1","2_e1","3_e2","4_X","5_e1","7_e1","8_X","9_e2"]
    val events  = spark.sql(
      """
        |
        |select
        |  guid,
        |  sort_array(collect_list(concat_ws('_',timestamp,eventid))) as events
        |from dwd23.app_event_detail
        |
        |where dt='2021-08-04'
        |and (
        |  (eventid='fetchCoupon')
        |  or
        |  (eventid='adShow')
        |  or
        |  (eventid='productView')
        |  or
        |  (eventid='addCart')
        |)
        |
        |group  by guid
        |
        |""".stripMargin)

    // 根据目标事件发生的次数进行分段
    // g01, ["1_e1","2_e1","3_e2","4_X","5_e1","7_e1","8_X","9_e2"]
    // g01, ["1_e1","2_e1","3_e2","4_X"]
    // g01, ["5_e1","7_e1","8_X"]

    val rdd: RDD[(Long, String, String)] = events.rdd.flatMap(row => {
      val guid = row.getAs[Long](0)

      //dataframe中的数组，在api中对应的是 mutable.WrappedArray 类型
      val events = row.getAs[mutable.WrappedArray[String]]("events")
      // 先将事件列表变形：只留下每一个事件的名称，然后将整个列表变成一个整体字符串
      val str = events.map(s=>s.split("_")(1)).mkString(",")
      // 将str拼接一个空格后，按目标事件名切割字符串
      // ["a,b,c,",   "b,b,c,"   ,"," ,""]
      val eventStrArray = (str + " ").split("fetchCoupon")
        .filter(s=>StringUtils.isNotBlank(s.replaceAll(",","")))

      eventStrArray.map(s=>(guid,"fetchCoupon",s))

    })


    // 计算归因权重（调线性归因算法
    val resultRdd = linearAttribute(rdd)

    val res = resultRdd.toDF("strategy", "guid", "dest_event", "attr_event", "weight")

    res.show(100,false)

    spark.close()
  }



  /**
   * 线性归因
   *  // (g01,"X","addCart,adclickc")
   *
   *  //(g01,"x","addCart",50.0)
   *  //(g01,"x","adclickc",50.0)
   * @param rdd
   * @return
   */
  def linearAttribute(rdd:RDD[(Long,String,String)]): RDD[(String, Long, String, String, Double)] ={

    rdd.flatMap(tp=>{
      val guid = tp._1
      val destEvent = tp._2
      val events = tp._3
      val eventArr = events.split(",").filter(s=>StringUtils.isNotBlank(s))

      val size = eventArr.size
      eventArr.map(e=>("线性归因",guid,destEvent,e,100.0/size))
    })
  }

}
