package cn.doitedu.dw.attribute

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession

import scala.collection.{immutable, mutable}
import scala.collection.mutable.ListBuffer

/**
 * 归因分析
 */
object AttributeAnalysis {

  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.ERROR)
    val spark = SparkSession.builder()
      .appName("归因分析")
      .master("local")
      .enableHiveSupport()
      .getOrCreate()


    // 加载dwd层的事件明细表
    val detail = spark.read.csv("data/attr/attr.csv").toDF("guid", "eventid", "timestamp")
    detail.createTempView("detail")

    // 预处理：将每个用户的事件序列，聚合到一起
    val tmp = spark.sql(
      """
        |
        |SELECT
        |  guid,
        |  sort_array(collect_list(concat_ws('_',timestamp,eventid) )) as event_list
        |FROM detail
        |WHERE eventid in ('e1','e3','e5','e6')
        |GROUP BY guid
        |HAVING  array_contains(collect_list(eventid),'e6')
        |
        |""".stripMargin)
    tmp.createTempView("tmp")

    /**
     * +----+------------------------------------------+
     * |guid|event_list                                |
     * +----+------------------------------------------+
     * |g02 |[1_e3, 2_e1, 3_e1, 4_e1, 5_e5, 6_e6]      |
     * |g01 |[1_e1, 2_e3, 3_e1, 5_e3, 6_e1, 8_e6, 9_e5]|
     * +----+------------------------------------------+
     */


    /**
     * 首次触点归因分析
     *
     * 需求：
     * 目标事件 ：   e6
     * 待归因事件：  e1  e3  e5
     * 某用户行为序列：  e1  e3  e1  e1  e3  e5  e5  e6  e3  e5   e6
     */

    val findFirstAttr = (eventList: mutable.WrappedArray[String], attrEvents: mutable.WrappedArray[String]) => {
      val events = eventList.map(s => s.split("_")(1))

      val buffer = cutEvents(events.toArray, "e6")
      val x: mutable.Seq[(String, Double)] = for (events <- buffer) yield {
        if (!events(0).equals("e6")) (events(0), 100.0) else (null, 0.0)
      }

      x.filter(_._1 != null)
    }

    spark.udf.register("find_first_attr", findFirstAttr)
    val firstAttr = spark.sql(
      """
        |select
        |guid,
        |find_first_attr(event_list,array('e1','e3','e5')) as first_attr
        |from tmp
        |
        |""".stripMargin)

    firstAttr.show(100, false)


    /**
     * 末次触点归因分析
     *
     * 需求：
     * 目标事件 ：   e6
     * 待归因事件：  e1  e3  e5
     * 某用户行为序列：  e1  e3  e1  e1  e3  e5  e5  e6
     */


    /**
     * 线性归因分析
     *
     * 需求：
     * 目标事件 ：   e6
     * 待归因事件：  e1  e3  e5
     * 某用户行为序列：  e1  e3  e5  e1  e3  e5  e5  e6
     * g01    map{e1->33,e5->33,e3->33}
     * g02    map{e1->50,e5->50}
     * g03    map{e3->100}
     */


    /**
     * 位置归因分析
     *
     * 需求：
     * 目标事件 ：   e6
     * 待归因事件：  e1  e3  e5  e2
     *
     */
    val locationAttr = (eventList: mutable.WrappedArray[String], attrEvents: mutable.WrappedArray[String]) => {
      val strings = eventList.map(s => s.split("_")(1))
      val events = new ListBuffer[String]()

      val targetEventIndex = strings.indexOf("e6")

      // 去重，留下相同事件中最近的一个
      for (i <- 0 until targetEventIndex) {
        if (!events.contains(strings(targetEventIndex - i - 1))) events += strings(targetEventIndex - i - 1)
      }

      val tuples = new ListBuffer[(String, Double)]()
      if (events.size == 1) {
        tuples += ((events(0), 100.0))
      } else if (events.size == 2) {
        tuples += ((events(0), 40.0))
        tuples += ((events(1), 40.0))
      } else if (events.size > 2) {
        tuples += ((events(0), 40.0))
        tuples += ((events(events.size - 1), 40.0))
        for (i <- 1 to events.size - 2) {
          tuples += ((events(i), 20.0 / (events.size - 2)))
        }
      }

      tuples.toMap

    }

    spark.udf.register("location_attr", locationAttr)
    spark.sql(
      """
        |select
        |guid,
        |location_attr(event_list,array('e1','e3','e5')) as loc_attr
        |from tmp
        |
        |""".stripMargin).show(100, false)

    /**
     * 时间衰减归因分析
     *
     * 需求：
     * 目标事件 ：   e6
     * 待归因事件序列：  e1     e3      e5         e2
     * 1    0.9   0.9*0.9      0.9*0.9*0.9
     *
     * e5的归因比重：  0.81/(1+0.9+0.9*0.9+0.9*0.9*0.9)
     */

    val timeDecay = (eventList: mutable.WrappedArray[String], attrEvents: mutable.WrappedArray[String]) => {
      val events = eventList.map(s => s.split("_")(1))

      //   -<- 远期   e1   e1  e3   e5   e3   e5  e2  e3  e6  e3 近期 ->-
      val res = new ListBuffer[Map[String, Double]]()

      val buffer = cutEvents(events.toArray, "e6")
      for (events <- buffer) yield {

        val listBuffer = new ListBuffer[String]()
        val targetEventIndex = events.indexOf("e6")

        // 去重，留下相同事件中最近的一个
        for (i <- 0 until targetEventIndex) {
          if (!listBuffer.contains(events(targetEventIndex - i - 1))) listBuffer += events(targetEventIndex - i - 1)
        }
        // (e3, e5, e1)
        // 生成权重数组   1  2  3
        val weightArray = for (i <- 0 until listBuffer.size) yield Math.pow(0.9, i)

        val tuples = for (i <- 0 until listBuffer.size) yield {
          (listBuffer(i), (weightArray(i) * 100.0) / weightArray.sum)
        }

        tuples.toMap
      }
    }


    spark.udf.register("time_decay", timeDecay)
    spark.sql(
      """
        |select
        |guid,
        |time_decay(event_list,array('e1','e3','e5')) as time_decayed_attr
        |from tmp
        |
        |""".stripMargin).show(100, false)


    spark.close()

  }

  def cutEvents(arr: Array[String], ev: String) = {
    val indices = for (i <- 0 until arr.size) yield if (arr(i).equals(ev)) i else -1
    val idx = indices.filter(_ != -1)
    val buffer = new ListBuffer[Array[String]]

    var j = 0
    for (i <- idx) {
      buffer += arr.slice(j, i + 1)
      j = i + 1
    }
    buffer
  }
}
