package cn.doitedu.dw_etl

import cn.doitedu.dw_utils.AttributeUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

import scala.collection.mutable.ListBuffer

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-12-19
 * @desc 归因分析
 */
object TargetAttribute {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .appName("归因分析")
      .master("local")
      .enableHiveSupport()
      .getOrCreate()


    // 过滤出模型中要求的所有事件
    // 归因模型：
    //  目标事件:   e4
    //  待归因事件: e1,[p1]=v2 |  e2,[p1]=v1  | e3,[p1]=v3
    val events = spark.sql(
      """
        |
        |select
        |   guid,
        |   eventid,
        |   ts
        |from tmp.mall_app_event_dtl_orc
        |where dt='2021-12-19' and
        |   ( eventid='e4'
        |   or (eventid='e1' and properties['p1']='v2')
        |   or (eventid='e2' and properties['p1']='v1')
        |   or (eventid='e3' and properties['p1']='v3')
        |   )
        |""".stripMargin)


    events.show(100, false)

    /**
     * +----+-------+------+
     * |guid|eventid|ts    |
     * +----+-------+------+
     * |1   |e1     |100000|
     * |1   |e2     |100002|
     * |1   |e3     |100004|
     * |1   |e4     |100005|
     * |2   |e1     |100001|
     * |2   |e2     |100002|
     * |2   |e3     |100004|
     * |2   |e4     |100005|
     * |3   |e1     |100001|
     * |3   |e2     |100002|
     * |3   |e3     |100004|
     * |3   |e4     |100005|
     * |4   |e1     |100001|
     * |4   |e2     |100002|
     * |4   |e1     |100003|
     * |4   |e4     |100005|
     * +----+-------+------+
     */
   val result: RDD[(Long, String, Long, Double)] =  events.rdd
      .map(row => {
        val guid: Long = row.getAs[Long]("guid")
        val eventid: String = row.getAs[String]("eventid")
        val ts: Long = row.getAs[Long]("ts")
        (guid, eventid, ts)
      })
      .groupBy(_._1)
      .flatMap(tp=>{
        // 将一个人的满足模型定义的行为，收集成一个list，并按照时间戳排序
        val eventList: List[(Long, String, Long)] = tp._2.toList.sortBy(_._3)
        // eventsList:
        //    [(1,e1,100000),(1,e2,100002),(1,e3,100004),(1,e4,100005)]
        //  现实复杂情况：[(1,e1,100000),(1,e2,100002),(1,e4,100005),(1,e3,100004),(1,e4,100005),(1,e3,100004)]


        // [(1,e1,100000),(1,e2,100002),(1,e4,100005),(1,e3,100004),(1,e4,100005),(1,e3,100004),(1,e3,100004)]
        // 反向从list中寻找第一个 目标事件所在的索引
        val idx: Int = eventList.reverse.indexWhere(tp => tp._2.equals("e4"))

        // 去掉list末尾的那些没有目标事件的原因事件
        val cleanedEvents: List[(Long, String, Long)] = eventList.reverse.slice(idx, eventList.size).reverse
        // [(1,e1,100000),(1,e2,100002),(1,e4,100005),(1,e3,100004),(1,e4,100005),(1,e4,100005)]

        // 再来按照 目标事件e4 分段
        // [(1,e1,100000),(1,e2,100002)]
        // [(1,e3,100004)]

        // 定义一个存储多个分段的List
        val segments = new ListBuffer[ListBuffer[(Long, String, Long)]]

        // 定义一个存一个分段事件的List
        var segment = new ListBuffer[(Long, String, Long)]
        for (elem <- cleanedEvents) {
          if(!elem._2.equals("e4")){
            // 添加到分段中
            segment += elem
          }else{
            // 结束一个分段，并添加到分段列表中
            segments += segment

            // 并且，创建一个新的分段list
            segment = new ListBuffer[(Long, String, Long)]
          }
        }

        // 分段列表中，有可能存在空分段，可以先过滤一下
        val filteredSegments  = segments.filter(_.size > 0)

        /**
         * 为每一个分段中的每一个原因事件打分
         * filteredSegments:
         *     [(1,e1,100000),(1,e2,100002)]
         *     [(1,e3,100004)]
         *
         *  用首次触点归因输出：
         *     1,e4,e1,100%
         *  用末次触点归因输出
         *    1,e4,e3,100%
         *  用线性归因输出：
         *    1,e4,e1,33.3%
         *    1,e4,e2,33.3%
         *    1,e4,e3,33.3%
         *  用时间衰减归因输出：
         *    1,e4,e1,10%
         *    1,e4,e2,20%
         *    1,e4,e3,70%
         *  用位置归因输出（首末各占40%，中间各事件平摊20%)
         *    1,e4,e1,40%
         *    1,e4,e2,20%
         *    1,e4,e3,40%
         */


        for (segment <- filteredSegments) yield {
          AttributeUtils.timeDecayAttribute(segment.toList)
        }
      })
      .flatMap(lst=>lst)



    import spark.implicits._
    result.toDF("guid","eventid","ts","factor").show(100,false)


    spark.close()

  }
}
