package cn.doitedu

import org.apache.parquet.format.IntType
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{DoubleType, StructField, StructType}
import org.apache.spark.sql.types._

import scala.collection.mutable.ListBuffer


case class PageView(guid: Long, session_id: String, event_id: String, action_time: Long, url: String, ref: String)
case class TreeNode(guid: Long, session_id: String, event_id: String, action_time: Long, url: String, ref: String, children: ListBuffer[TreeNode])



object Job03_PagePVContribution {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("page_contribution")
      .master("local")
      .enableHiveSupport()
      .getOrCreate()

    val sc = spark.sparkContext

    val rdd = sc.makeRDD(Seq(
      PageView(1, "s01", "page_load", 1000, "/a", null),
      PageView(1, "s01", "page_load", 2000, "/b", "/a"),
      PageView(1, "s01", "page_load", 3000, "/c", "/a"),
      PageView(1, "s01", "page_load", 4000, "/d", "/a"),
      PageView(1, "s01", "page_load", 5000, "/e", "/c"),
      PageView(1, "s01", "page_load", 6000, "/a", "/c"),
      PageView(1, "s01", "page_load", 7000, "/y", "/a"),
      PageView(1, "s01", "page_load", 8000, "/w", "/a"),

      PageView(2, "s02", "page_load", 1000, "/a", null),
      PageView(2, "s02", "page_load", 2000, "/x", "/a"),
      PageView(2, "s02", "page_load", 3000, "/y", "/a"),
      PageView(2, "s02", "page_load", 4000, "/d", "/a"),
      PageView(2, "s02", "page_load", 5000, "/e", "/d"),
      PageView(2, "s02", "page_load", 6000, "/a", "/d"),
      PageView(2, "s02", "page_load", 7000, "/b", "/e"),
      PageView(2, "s02", "page_load", 8000, "/c", "/e"),
    ))

    val resultRdd: RDD[(String, Int, Int)] = {
      // 按照会话分组
      rdd.groupBy(pv => (pv.guid, pv.session_id))
      .flatMapValues(iter => {
        val events = iter.toList

        val nodeLst: Seq[TreeNode] =
          events.sortBy(pv => pv.action_time)
            .map(pv => TreeNode(pv.guid, pv.session_id, pv.event_id, pv.action_time, pv.url, pv.ref, ListBuffer.empty))
            .reverse

        // 然后，开始建立父子关系（上下游关系）
        // 构造树结构的过程
        for (i <- 0 until nodeLst.length - 1) {
          val curNode: TreeNode = nodeLst(i)

          var find = false
          for (j <- i + 1 until nodeLst.length if !find) {
            if (curNode.ref.equals(nodeLst(j).url)) {
              // 把当前i节点，添加到 j 节点的children中
              nodeLst(j).children += curNode
              find = true
            }
          }
        }
        // 循环结束后，nodeLst的最后一个节点，就是整棵树的  root 节点
        val rootNode: TreeNode = nodeLst.reverse.head


        // 准备一个容器，用来装结果 (页面，直接贡献量，总共限量)
        // 计算贡献量的过程
        val results = ListBuffer.empty[(String, Int, Int)]
        calcContribute(rootNode,results)

        results

      })
      .map(kv=>kv._2)
    }


    /**
     * 上面的过程，是按照每个用户的每个会话
     * 来计算，会话中的每个页面产生的直接贡献和总贡献
     * 而我们要的最终结果，应该是按照  页面  进行汇总的结果
     */

    val df = spark.createDataFrame(resultRdd)
    df.createOrReplaceTempView("res")

    // 按页面对上面的结果进行聚合汇总，得到最终结果
    spark.sql(
      """
        |
        |select
        |  _1 as url,
        |  count(1) as self_pv,
        |  sum(_2) as direct_contribution,
        |  sum(_3) as whole_contribution
        |from res
        |group by _1
        |
        |""".stripMargin).show()



  }


  def calcContribute(node: TreeNode, results: ListBuffer[(String, Int, Int)]): Int = {
    // 直接贡献量
    val zhiJie = node.children.length

    // 递归退出条件
    if (zhiJie == 0) {
      results += ((node.url, 0, 0))
      return 0
    }

    // 每个子节点的贡献量之和
    var sum = 0
    for (childNode <- node.children) {
      val childContribution = calcContribute(childNode, results)
      sum += childContribution
    }

    // 添加当前节点的 结果数据，到容器中
    results += ((node.url, zhiJie, zhiJie + sum))
    zhiJie + sum

  }


}
