package com.doit.dw.dws

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import scala.collection.immutable
import scala.collection.mutable.ListBuffer

/**
 * @Date: 23.6.14 
 * @Author: Hang.Nian.YY
 * @qq: 598196583
 * @Tips: 学大数据 ,到多易教育
 * @Description:
 */
object Attribution {
  Logger.getLogger("org").setLevel(Level.ERROR)
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME", "root")
    val session = SparkSession.builder()
      .master("local[*]")
      .appName(this.getClass.getSimpleName)
      .enableHiveSupport()
      .getOrCreate()

    val frame = session.sql(
      """
        |with  x as (select guid,
        |                   event_id,
        |                   ts
        |            from doe39.test_dwd_app_event_funnel
        |            where (event_id = 'e4' and properties['p2'] = 'v2')
        |               or (event_id = 'e1' and properties['p1'] = 'v1')
        |               or (event_id = 'e3' and properties['p2'] = 'v2')
        |               or event_id = 'e2'
        |               or event_id = 'e5')  ,
        |y as (
        |    select
        |    guid
        |    from
        |    doe39.test_dwd_app_event_funnel
        |    where event_id = 'e4' and properties['p2'] ='v2'
        |    group by guid
        |    )
        |select
        |    x.guid ,
        |    x.event_id ,
        |    x.ts
        |    from x join  y
        |on  x.guid = y.guid
        |""".stripMargin)

    val rdd = frame.rdd.map(row => {
      // 解析结构  获取表字段值
      val guid = row.getAs[Long]("guid")
      val ts = row.getAs[Long]("ts")
      val event_id = row.getAs[String]("event_id")
      (guid, event_id, ts)
    })
   // 按照用户分组  组内按照时间排序
   val grouped = rdd.groupBy(_._1)
    val eventSeq: RDD[ListBuffer[(Long, String, Long)]] = grouped.flatMap(es => {
      val sortedEvs = es._2.toList.sortBy(_._3)
      val lss = ListBuffer[ListBuffer[(Long, String, Long)]]()
      var eventsList = ListBuffer[(Long, String, Long)]()
      if (sortedEvs != null && sortedEvs.size > 0) {
        for (elem <- sortedEvs) {
          if (!elem._2.equals("e4")) {
            eventsList.append(elem)
          } else {
            lss.append(eventsList)
            eventsList = ListBuffer[(Long, String, Long)]()
          }
        }
      }
      lss
    })

    // --------------------------------------首次触点归因分析----------------------------------
    /**
     * ListBuffer((1,e2,12), (1,e3,13))
      *ListBuffer((1,e2,15), (1,e1,16))
      *ListBuffer((3,e2,12), (3,e5,14))
      *ListBuffer((2,e2,12), (2,e3,13), (2,e5,14), (2,e2,15), (2,e1,16))
     */
    // ctrl  + alt  + m 将代码片段封装成方法

    // --------------------------------------末次触点归因分析----------------------------------
    val res2: RDD[immutable.IndexedSeq[(Long, String, Long, Int)]] = eventSeq.map(ls => {
      for (i <- 0 until ls.size) yield {
        var tp4:(Long,String,Long,Int) = null
        val  tp = if (i == ls.size-1) {
          val tp = ls(i)
          tp4 =  (tp._1, tp._2, tp._3, 1)
        } else {
          val tp = ls(i)
          tp4 =  (tp._1, tp._2, tp._3, 0)
        }
        tp4
      }
    })
    // --------------------------------------线性归因分析----------------------------------
    val res3: RDD[immutable.IndexedSeq[(Long, String, Long, Double)]] = eventSeq.map(ls => {
      for (i <- 0 until ls.size) yield {
        val tp3 = ls(i)
        (tp3._1,tp3._2,tp3._3 , 1/ls.size.toDouble)
      }
    })
    // --------------------------------------U型归因分析----------------------------------

    val res4: RDD[ListBuffer[(Long, String, Long, Double)]] = eventSeq.map(ls => { // [(1,e2,12) , (1,e3,13), (1,e5,14)]
      val number1 = 1 to ls.size
      val number2 = number1.reverse
      val abs = number1.zip(number2).map(tp => {
        Math.abs(tp._1 - tp._2)
      })
      val fm = abs.sum.toDouble
      val rate = abs.map(_ / fm)

      val tuples = ls.zip(rate).map(tp => {
        (tp._1._1, tp._1._2, tp._1._3, tp._2)
      })
      tuples
    })
    // --------------------------------------时间衰减归因分析----------------------------------

    val res5: RDD[ListBuffer[(Long, String, Long, Double)]] = eventSeq.map(ls => {
      val indexs = 1 to ls.size
      val fm = indexs.sum
      val weight = indexs.map(_ / fm.toDouble)
      val tuples = ls.zip(weight)
      tuples.map(tp => {
        (tp._1._1, tp._1._2, tp._1._3, tp._2)
      })
    })
  //  res5.foreach(println)
    //-----------------------------------------------------------------------
    /**
     * 上面的代码获取到的是
     * 在每次完成目标事件时 : 各个待归因事件在此次目标事件过程中的权重
     * ListBuffer((2,e2,12,0.06666666666666667), (2,e3,13,0.13333333333333333), (2,e5,14,0.2), (2,e2,15,0.26666666666666666), (2,e1,16,0.3333333333333333))
     * 用户2 在完成一次目标事件(下单) 过程中 各个渠道/活动/事件 在本次目标事件中的贡献值
     *  分析 渠道/活动/事件对目标事件的  整体贡献值
     *     分组  求和
     */
    val res: RDD[(Long, String, Long, Double)] = eventSeq.flatMap(ls => {
      val indexs = 1 to ls.size
      val fm = indexs.sum
      val weight = indexs.map(_ / fm.toDouble)
      val tuples = ls.zip(weight)
      tuples.map(tp => {
        (tp._1._1, tp._1._2, tp._1._3, tp._2)
      })
    })
    val grouped2 = res.groupBy(_._2)

    grouped2.map(tp=>{
      (tp._1  ,tp._2.map(_._4).sum)
    }).sortBy(-_._2)

    session.close()
  }

  def firstTouchAttr(eventSeq: RDD[ListBuffer[(Long, String, Long)]]): RDD[IndexedSeq[(Long, String, Long, Int)]] = {
    eventSeq.map(ls => {
      for (i <- 0 until ls.size) yield {
        var tp4: (Long, String, Long, Int) = null
        val tp = if (i == 0) {
          val tp = ls(i)
          tp4 = (tp._1, tp._2, tp._3, 1)
        } else {
          val tp = ls(i)
          tp4 = (tp._1, tp._2, tp._3, 0)
        }
        tp4
      }
    })


  }
}
