package com.niit.session

import com.niit.commons.conf.ConfigurationManager
import com.niit.commons.constant.Constants
import com.niit.commons.model._
import com.niit.commons.utils._
import net.sf.json.JSONObject
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SaveMode, SparkSession}

import java.io.FileInputStream
import java.util.{Date, Properties, UUID}
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.util.Random


object SessionStat {
  def main(args: Array[String]): Unit = {
    // 1. 创建SparkConf
    val sparkConf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("session")
    // 2. 创建SparkSession
    val spark: SparkSession = SparkSession.builder()
      .master("local[*]")
      .appName("session")
      .enableHiveSupport()
      .getOrCreate()

    // 3. 根据日期获取对象的用户行为数据
    // 3.1 读取配置文件，从文件中读取JSON格式的查询条件
    //与文档写法不一样
    val properties: Properties = {
      val p =new Properties()
      p.load(new FileInputStream("F:\\javatest\\offline\\src\\main\\resources\\commerce.properties"))
      p
    }
    val conditionJson: String = properties.getProperty("task.params.json")

    // 3.2 根据日期获取对象的用户行为数据
    val conditionObj: JSONObject = JSONObject.fromObject(conditionJson)
    val userVisitAction: RDD[UserVisitAction] = getActionRDDByDateRange(spark, conditionObj)
    // 测试打印
    userVisitAction.foreach(println)


    // 4. 将用户行为数据转化为K-V格式：RDD[(session_id, UserVisitAction)]
    //-------------------------------------------------------------
    //-------------------------这里文档写错了-------------------------
    //-------------------------------------------------------------
    //val session2ActionRDD: RDD[(String, UserVisitAction)] = action.map(item => (item.session_id, item))
    val session2ActionRDD: RDD[(String, UserVisitAction)] = userVisitAction.map(item => (item.session_id, item))

    // 6. 创建累加器对象
    val sessionAccumlator: SessionAccumlator = new SessionAccumlator
    // 注册累加器
    //-------------------------------------------------------------
    //-------------------------这里文档写错了-------------------------
    //-------------------------------------------------------------
    //session.sparkContext.register(sessionAccumlator)
    spark.sparkContext.register(sessionAccumlator)
    //文档这里没有调用
    val sessionid2FullActionRDD = aggregateBySession(spark,session2ActionRDD)

    // 7 过滤数据
    //-------------------------------------------------------------
    //-------------------------这里文档写错了-------------------------
    //-------------------------------------------------------------
    //conditionObj
    //val filteredSessionid2AggInfoRDD: RDD[(String, String)] = filterAggInfo(jsonObject, sessionid2FullActionRDD, sessionAccumlator)
    val filteredSessionid2AggInfoRDD: RDD[(String, String)] = filterAggInfo(conditionObj, sessionid2FullActionRDD, sessionAccumlator)
    // 测试打印
    filteredSessionid2AggInfoRDD.foreach(println)


    // 8. 需求一：计算各 session 范围占比并写入 MySQL
    // 创建全局唯一主键
    val taskUUID: String = UUID.randomUUID().toString
    //-------------------------------------------------------------
    //-------------------------这里文档写错了-------------------------
    //-------------------------------------------------------------
    //getSessionRatio(session, taskUUID, sessionAccumlator.value)
    getSessionRatio(spark, taskUUID, sessionAccumlator.value)


    //9. 需求二：session随机抽取
    sessionRandomExtract(spark,taskUUID,filteredSessionid2AggInfoRDD)


    //10. 需求3:top10热门类别的session
    val FilterActionRdd: RDD[(String,UserVisitAction)] = session2ActionRDD.join(filteredSessionid2AggInfoRDD).map{
      case (sessionid, (action, fullInfo)) =>
        (sessionid, action)
    }
    val top10CategoryArray: Array[(CatgegorySortKey,String)] = top10PopularCategories(spark,taskUUID,FilterActionRdd)


    //11. 需求4：top10热门品类的TOP10活跃session统计
    top10ActiveSession(spark,taskUUID,FilterActionRdd,top10CategoryArray)
  }

  def getActionRDDByDateRange(spark: SparkSession,
                              taskParam: JSONObject): RDD[UserVisitAction] = {
    val startDate = ParamUtils.getParam(taskParam, Constants.PARAM_START_DATE)
    val endDate = ParamUtils.getParam(taskParam, Constants.PARAM_END_DATE)

    import spark.implicits._
    spark.sql("use commerce")
    spark.sql("select * from user_visit_action where date>='"
      + startDate + "' and date<='"
      + endDate + "'").as[UserVisitAction].rdd
  }


  def aggregateBySession(spark: SparkSession, sessinoid2actionRDD: RDD[(String, UserVisitAction)]): RDD[(String, String)] = {
    // 对行为数据按 session 粒度进行分组
    val sessionid2ActionsRDD = sessinoid2actionRDD.groupByKey()

    // 对每一个 session 分组进行聚合，将 session 中所有的搜索词和点击品类都聚合起来，
    // <userid,partAggrInfo(sessionid,searchKeywords,clickCategoryIds)>
    val userid2PartAggrInfoRDD = sessionid2ActionsRDD.map { case (sessionid, userVisitActions) =>

      val searchKeywordsBuffer = new StringBuffer("")
      val clickCategoryIdsBuffer = new StringBuffer("")

      var userid = -1L

      // session 的起始和结束时间
      var startTime: Date = null
      var endTime: Date = null
      var stepLength = 0

      // 遍历 session 所有的访问行为
      userVisitActions.foreach { userVisitAction =>
        if (userid == -1L) {
          userid = userVisitAction.user_id
        }
        val searchKeyword = userVisitAction.search_keyword
        val clickCategoryId = userVisitAction.click_category_id

        // 实际上这里要对数据说明一下
        // 并不是每一行访问行为都有 searchKeyword 和 clickCategoryId 两个字段的
        // 其实，只有搜索行为，是有 searchKeyword 字段的
        // 只有点击品类的行为，是有 clickCategoryId 字段的
        // 所以，任何一行行为数据，都不可能两个字段都有，所以数据是可能出现 null 值的

        // 我们决定是否将搜索词或点击品类 id 拼接到字符串中去
        // 首先要满足：不能是 null 值
        // 其次，之前的字符串中还没有搜索词或者点击品类 id

        if (StringUtils.isNotEmpty(searchKeyword)) {
          if (!searchKeywordsBuffer.toString.contains(searchKeyword)) {
            searchKeywordsBuffer.append(searchKeyword + ",")
          }
        }
        if (clickCategoryId != null && clickCategoryId != -1L) {
          if (!clickCategoryIdsBuffer.toString.contains(clickCategoryId.toString)) {
            clickCategoryIdsBuffer.append(clickCategoryId + ",")
          }
        }

        // 计算 session 开始和结束时间
        val actionTime = DateUtils.parseTime(userVisitAction.action_time)

        if (startTime == null) {
          startTime = actionTime
        }
        if (endTime == null) {
          endTime = actionTime
        }

        if (actionTime.before(startTime)) {
          startTime = actionTime
        }
        if (actionTime.after(endTime)) {
          endTime = actionTime
        }

        // 计算 session 访问步长
        stepLength += 1
      }

      val searchKeywords = StringUtils.trimComma(searchKeywordsBuffer.toString)
      val clickCategoryIds = StringUtils.trimComma(clickCategoryIdsBuffer.toString)

      // 计算 session 访问时长（秒）
      val visitLength = (endTime.getTime - startTime.getTime) / 1000

      // 聚合数据，使用 key=value|key=value
      val partAggrInfo: String = Constants.FIELD_SESSION_ID + "=" + sessionid + "|" +
        Constants.FIELD_SEARCH_KEYWORDS + "=" + searchKeywords + "|" +
        Constants.FIELD_CLICK_CATEGORY_IDS + "=" + clickCategoryIds + "|" +
        Constants.FIELD_VISIT_LENGTH + "=" + visitLength + "|" +
        Constants.FIELD_STEP_LENGTH + "=" + stepLength + "|" +
        Constants.FIELD_START_TIME + "=" + DateUtils.formatTime(startTime)
      (userid, partAggrInfo);
    }

    // 查询所有用户数据，并映射成<userid,Row>的格式
    import spark.implicits._
    val userid2InfoRDD = spark.sql("select * from user_info").as[UserInfo].rdd.map(item => (item.user_id, item))

    // 将 session 粒度聚合数据，与用户信息进行 join
    val userid2FullInfoRDD = userid2PartAggrInfoRDD.join(userid2InfoRDD)

    // 对 join 起来的数据进行拼接，并且返回<sessionid,fullAggrInfo>格式的数据
    val sessionid2FullAggrInfoRDD = userid2FullInfoRDD.map { case (uid, (partAggrInfo, userInfo)) =>
      val sessionid = StringUtils.getFieldFromConcatString(partAggrInfo, "\\|", Constants.FIELD_SESSION_ID)

      val fullAggrInfo = partAggrInfo + "|" +
        Constants.FIELD_AGE + "=" + userInfo.age + "|" +
        Constants.FIELD_PROFESSIONAL + "=" + userInfo.professional + "|" +
        Constants.FIELD_CITY + "=" + userInfo.city + "|" +
        Constants.FIELD_SEX + "=" + userInfo.sex

      (sessionid, fullAggrInfo)
    }

    sessionid2FullAggrInfoRDD
  }


  def calculateStepLength(stepLength: Long, sessionAccumlator: SessionAccumlator): Unit = {
    if (stepLength >= 1 && stepLength <= 3)
      sessionAccumlator.add(Constants.STEP_PERIOD_1_3)
    else if (stepLength >= 4 && stepLength <= 6)
      sessionAccumlator.add(Constants.STEP_PERIOD_4_6)
    else if (stepLength >= 7 && stepLength <= 9)
      sessionAccumlator.add(Constants.STEP_PERIOD_7_9)
    else if (stepLength >= 10 && stepLength <= 30)
      sessionAccumlator.add(Constants.STEP_PERIOD_10_30)
    else if (stepLength > 30 && stepLength <= 60)
      sessionAccumlator.add(Constants.STEP_PERIOD_30_60)
    else if (stepLength > 60)
      sessionAccumlator.add(Constants.STEP_PERIOD_60)
  }

  def calculateVisitLength(visitLength: Long, sessionAccumlator: SessionAccumlator): Unit = {
    if (visitLength >= 1 && visitLength <= 3)
      sessionAccumlator.add(Constants.TIME_PERIOD_1s_3s)
    else if (visitLength >= 4 && visitLength <= 6)
      sessionAccumlator.add(Constants.TIME_PERIOD_4s_6s)
    else if (visitLength >= 7 && visitLength <= 9)
      sessionAccumlator.add(Constants.TIME_PERIOD_7s_9s)
    else if (visitLength >= 10 && visitLength <= 30)
      sessionAccumlator.add(Constants.TIME_PERIOD_10m_30m)
    else if (visitLength > 30 && visitLength <= 60)
      sessionAccumlator.add(Constants.TIME_PERIOD_30s_60s)
    else if (visitLength > 60 && visitLength <= 180)
      sessionAccumlator.add(Constants.TIME_PERIOD_1m_3m)
    else if (visitLength > 180 && visitLength <= 600)
      sessionAccumlator.add(Constants.TIME_PERIOD_3m_10m)
    else if (visitLength > 600 && visitLength <= 1800)
      sessionAccumlator.add(Constants.TIME_PERIOD_10m_30m)
    else if (visitLength > 1800)
      sessionAccumlator.add(Constants.TIME_PERIOD_30m)
  }


  // 过滤数据函数
  def filterAggInfo(jSONObject: JSONObject, fullAggInfo: RDD[(String, String)], sessionAccumlator: SessionAccumlator): RDD[(String, String)] = {
    // 获取过滤条件
    val startAge = ParamUtils.getParam(jSONObject, Constants.PARAM_START_AGE)
    val endAge = ParamUtils.getParam(jSONObject, Constants.PARAM_END_AGE)
    val professionals = ParamUtils.getParam(jSONObject, Constants.PARAM_PROFESSIONALS)
    val cities = ParamUtils.getParam(jSONObject, Constants.PARAM_CITIES)
    val sex = ParamUtils.getParam(jSONObject, Constants.PARAM_SEX)
    val keywords = ParamUtils.getParam(jSONObject, Constants.PARAM_KEYWORDS)
    val categoryIds = ParamUtils.getParam(jSONObject, Constants.PARAM_CATEGORY_IDS)
    // 拼接过滤条件
    var filterInfo = (if (startAge != null) Constants.PARAM_START_AGE + "=" + startAge + "|" else "") +
      (if (endAge != null) Constants.PARAM_END_AGE + "=" + endAge + "|" else "") +
      (if (professionals != null) Constants.PARAM_PROFESSIONALS + "=" + professionals + "|" else "") +
      (if (cities != null) Constants.PARAM_CITIES + "=" + cities + "|" else "") +
      (if (sex != null) Constants.PARAM_SEX + "=" + sex + "|" else "") +
      (if (keywords != null) Constants.PARAM_KEYWORDS + "=" + keywords + "|" else "") +
      (if (categoryIds != null) Constants.PARAM_CATEGORY_IDS + "=" + categoryIds + "|" else "")

    if (filterInfo.endsWith("\\|")) {
      filterInfo = filterInfo.substring(0, filterInfo.length - 1)
    }

    // 根据过滤条件进行过滤
    val filteredRdd: RDD[(String, String)] = fullAggInfo.filter {
      case (session, fullInfo) =>
        var success = true
        // 依次按照筛选条件进行过滤
        if (!ValidUtils.between(fullInfo, Constants.FIELD_AGE, filterInfo, Constants.PARAM_START_AGE, Constants.PARAM_END_AGE)) {
          // 按照年龄范围进行过滤（startAge、endAge）
          success = false
        } else if (!ValidUtils.in(fullInfo, Constants.FIELD_PROFESSIONAL, filterInfo, Constants.PARAM_PROFESSIONALS)) {
          // 按照职业范围进行过滤（professionals）
          success = false
        } else if (!ValidUtils.in(fullInfo, Constants.FIELD_CITY, filterInfo, Constants.PARAM_CITIES)) {
          // 按照城市范围进行过滤（cities）
          success = false
        } else if (!ValidUtils.in(fullInfo, Constants.FIELD_SEX, filterInfo, Constants.PARAM_SEX)) {
          // 按照性别进行过滤
          success = false
        } else if (!ValidUtils.in(fullInfo, Constants.FIELD_SEARCH_KEYWORDS, filterInfo, Constants.PARAM_KEYWORDS)) {
          // 按照关键词进行过滤
          success = false
        } else if (!ValidUtils.in(fullInfo, Constants.FIELD_CATEGORY_ID, filterInfo, Constants.PARAM_CATEGORY_IDS)) {
          // 按照点击品类 id 进行过滤
          success = false
        }

        // 如果符合条件
        if (success) {
          sessionAccumlator.add(Constants.SESSION_COUNT)
          val visitLength = StringUtils.getFieldFromConcatString(fullInfo, "\\|", Constants.FIELD_VISIT_LENGTH).toLong
          val stepLength = StringUtils.getFieldFromConcatString(fullInfo, "\\|", Constants.FIELD_STEP_LENGTH).toLong
          // 计算访问时长范围
          calculateVisitLength(visitLength, sessionAccumlator)
          // 计算访问步长范围
          calculateStepLength(stepLength, sessionAccumlator)
        }
        success
    }
    filteredRdd
  }

  /**
   * 需求一：统计用户session各范围访问步长、访问时长的占比
   *
   */

  def getSessionRatio(session: SparkSession, taskUUID: String, value: mutable.HashMap[String, Int]): Unit = {

    // 从 Accumulator 统计串中获取值
    val session_count = value.getOrElse(Constants.SESSION_COUNT, 1).toDouble

    val visitLength_1s_3s = value.getOrElse(Constants.TIME_PERIOD_1s_3s, 0)
    val visitLength_4s_6s = value.getOrElse(Constants.TIME_PERIOD_4s_6s, 0)
    val visitLength_7s_9s = value.getOrElse(Constants.TIME_PERIOD_7s_9s, 0)
    val visitLength_10s_30s = value.getOrElse(Constants.TIME_PERIOD_10s_30s, 0)
    val visitLength_30s_60s = value.getOrElse(Constants.TIME_PERIOD_30s_60s, 0)
    val visitLength_1m_3m = value.getOrElse(Constants.TIME_PERIOD_1m_3m, 0)
    val visitLength_3m_10m = value.getOrElse(Constants.TIME_PERIOD_3m_10m, 0)
    val visitLength_10m_30m = value.getOrElse(Constants.TIME_PERIOD_10m_30m, 0)
    val visitLength_30m = value.getOrElse(Constants.TIME_PERIOD_30m, 0)

    val stepLength_1_3 = value.getOrElse(Constants.STEP_PERIOD_1_3, 0)
    val stepLength_4_6 = value.getOrElse(Constants.STEP_PERIOD_4_6, 0)
    val stepLength_7_9 = value.getOrElse(Constants.STEP_PERIOD_7_9, 0)
    val stepLength_10_30 = value.getOrElse(Constants.STEP_PERIOD_10_30, 0)
    val stepLength_30_60 = value.getOrElse(Constants.STEP_PERIOD_30_60, 0)
    val stepLength_60 = value.getOrElse(Constants.STEP_PERIOD_60, 0)

    // 计算各个访问时长和访问步长的范围
    val visit_length_1s_3s_ratio = NumberUtils.formatDouble(visitLength_1s_3s / session_count, 2)
    val visit_length_4s_6s_ratio = NumberUtils.formatDouble(visitLength_4s_6s / session_count, 2)
    val visit_length_7s_9s_ratio = NumberUtils.formatDouble(visitLength_7s_9s / session_count, 2)
    val visit_length_10s_30s_ratio = NumberUtils.formatDouble(visitLength_10s_30s / session_count, 2)
    val visit_length_30s_60s_ratio = NumberUtils.formatDouble(visitLength_30s_60s / session_count, 2)
    val visit_length_1m_3m_ratio = NumberUtils.formatDouble(visitLength_1m_3m / session_count, 2)
    val visit_length_3m_10m_ratio = NumberUtils.formatDouble(visitLength_3m_10m / session_count, 2)
    val visit_length_10m_30m_ratio = NumberUtils.formatDouble(visitLength_10m_30m / session_count, 2)
    val visit_length_30m_ratio = NumberUtils.formatDouble(visitLength_30m / session_count, 2)

    val step_length_1_3_ratio = NumberUtils.formatDouble(stepLength_1_3 / session_count, 2)
    val step_length_4_6_ratio = NumberUtils.formatDouble(stepLength_4_6 / session_count, 2)
    val step_length_7_9_ratio = NumberUtils.formatDouble(stepLength_7_9 / session_count, 2)
    val step_length_10_30_ratio = NumberUtils.formatDouble(stepLength_10_30 / session_count, 2)
    val step_length_30_60_ratio = NumberUtils.formatDouble(stepLength_30_60 / session_count, 2)
    val step_length_60_ratio = NumberUtils.formatDouble(stepLength_60 / session_count, 2)

    // 将统计结果封装为 Domain 对象
    val sessionAggrStat = SessionAggrStat(taskUUID, session_count.toInt, visit_length_1s_3s_ratio, visit_length_4s_6s_ratio, visit_length_7s_9s_ratio, visit_length_10s_30s_ratio,
      visit_length_30s_60s_ratio, visit_length_1m_3m_ratio, visit_length_3m_10m_ratio, visit_length_10m_30m_ratio, visit_length_30m_ratio, step_length_1_3_ratio,
      step_length_4_6_ratio, step_length_7_9_ratio, step_length_10_30_ratio, step_length_30_60_ratio, step_length_60_ratio)

    // 将统计结果写入MySQL表session_stat_ratio
    import session.implicits._
    val sessionRatioRDD: RDD[SessionAggrStat] = session.sparkContext.makeRDD(Array(sessionAggrStat))
    sessionRatioRDD.toDF().write.format("jdbc")
      .option("url", ConfigurationManager.config.getString(Constants.JDBC_URL))
      .option("user", ConfigurationManager.config.getString(Constants.JDBC_USER))
      .option("password", ConfigurationManager.config.getString(Constants.JDBC_PASSWORD))
      .option("dbtable", "session_aggr_stat")
      .mode(SaveMode.Append)
      .save()
  }

  /**
   * 需求二：session随机抽取
   *
   */
  def sessionRandomExtract(session: SparkSession,
                           taskUUID: String,
                           sessionid2AggrInfoRDD: RDD[(String, String)]): Unit = {
    // 第一步，计算出每天每小时的 session 数量，获取<yyyy-MM-dd_HH,aggrInfo>格式的 RDD
    val time2sessionidRDD: RDD[(String, String)] = sessionid2AggrInfoRDD.map {
      case (sid, fullInfo) => {
        val startTime = StringUtils.getFieldFromConcatString(fullInfo, "\\|", Constants.FIELD_START_TIME)
        // 将 key 改为 yyyy-MM-dd_HH 的形式（小时粒度）
        val dateHour = DateUtils.getDateHour(startTime)
        (dateHour, fullInfo)
      }
    }

    // 得到每天每小时的 session 数量
    // countByKey()计算每个不同的 key 有多少个数据
    // countMap<yyyy-MM-dd_HH, count>
    val countMap: collection.Map[String, Long] = time2sessionidRDD.countByKey()

    // 第二步，使用按时间 比例随机抽 取算法， 计算 出每天每小 时要抽取 session 的索引，将
    // <yyyy-MM-dd_HH,count>格式的 map，转换成<yyyy-MM-dd,<HH,count>>的格式
    val DateHourCountMap = new mutable.HashMap[String, mutable.HashMap[String, Long]]()

    for ((dateHour, count) <- countMap) {
      val date = dateHour.split("_")(0)
      val hour = dateHour.split("_")(1)
      // 通过模式匹配实现了 if 的功能
      DateHourCountMap.get(date) match {
        // 对应日期的数据不存在，则新增
        case None => DateHourCountMap(date) = new mutable.HashMap[String, Long]()
          DateHourCountMap(date) += (hour -> count)
        // 对应日期的数据存在，则累加
        // 如果有值，Some(hourCountMap)将值取到了 hourCountMap 中
        case Some(map) => DateHourCountMap(date) += (hour -> count)
      }
    }

    // 按时间比例随机抽取算法，总共要抽取 100 个 session，先按照天数，进行平分
    // 获取每一天要抽取的数量
    val extractPerDay = 100 / DateHourCountMap.size

    // 根据每个小时应该抽取的数量，来产生随机值
    // 遍历每个小时，填充 Map<date,<hour,(3,5,20,102)>>
    val dateHourExtractIndexListMap = new mutable.HashMap[String, mutable.HashMap[String, ListBuffer[Int]]]()

    for ((date, countMap) <- DateHourCountMap) {
      val dateSessionCount = countMap.values.sum
      dateHourExtractIndexListMap.get(date) match {
        case None => dateHourExtractIndexListMap(date) = new mutable.HashMap[String, ListBuffer[Int]]()
          generateRandomIndexList(extractPerDay, dateSessionCount, countMap, dateHourExtractIndexListMap(date))
        case Some(map) =>
          generateRandomIndexList(extractPerDay, dateSessionCount, countMap, dateHourExtractIndexListMap(date))
      }
    }

    // 至此，index 获取完毕
    //将 Map 进行广播
    val dateExtractIndexListMapBd = session.sparkContext.broadcast(dateHourExtractIndexListMap)

    // time2sessionidRDD <yyyy-MM-dd_HH,aggrInfo>
    // 执行 groupByKey 算子，得到<yyyy-MM-dd_HH,(session aggrInfo)>
    val time2sessionsRDD: RDD[(String, Iterable[String])] = time2sessionidRDD.groupByKey()

    // 第三步：遍历每天每小时的 session，然后根据随机索引进行抽取,我们用 flatMap 算子，遍历所有的
    // <dateHour,(session aggrInfo)>格式的数据
    val sessionRandomExtract: RDD[SessionRandomExtract] = time2sessionsRDD.flatMap {
      case (dateHour, iterableFullInfo) => {
        val date = dateHour.split("_")(0)
        val hour = dateHour.split("_")(1)
        val extractList = dateExtractIndexListMapBd.value.get(date).get(hour)
        val extractSessionArrayBuffer = new ArrayBuffer[SessionRandomExtract]()
        var index = 0
        for (fullInfo <- iterableFullInfo) {
          // 如果筛选 List 中包含当前的 index，则提取此 sessionAggrInfo 中的数据
          if (extractList.contains(index)) {
            val sessionId = StringUtils.getFieldFromConcatString(fullInfo, "\\|", Constants.FIELD_SESSION_ID)
            val startTime = StringUtils.getFieldFromConcatString(fullInfo, "\\|", Constants.FIELD_START_TIME)
            val searchKeywords = StringUtils.getFieldFromConcatString(fullInfo, "\\|", Constants.FIELD_SEARCH_KEYWORDS)
            val clickCategories = StringUtils.getFieldFromConcatString(fullInfo, "\\|", Constants.FIELD_CLICK_CATEGORY_IDS)
            val sessionExtract = SessionRandomExtract(taskUUID, sessionId, startTime, searchKeywords, clickCategories)
            extractSessionArrayBuffer += sessionExtract
          }
          index += 1
        }
        extractSessionArrayBuffer
      }
    }

    /* 将抽取后的数据保存到 MySQL */
    // 引入隐式转换，准备进行 RDD 向 Dataframe 的转换
    import session.implicits._
    // 为了方便地将数据保存到 MySQL 数据库，将 RDD 数据转换为 Dataframe
    sessionRandomExtract.toDF().write.format("jdbc")
      .option("url", ConfigurationManager.config.getString(Constants.JDBC_URL))
      .option("user", ConfigurationManager.config.getString(Constants.JDBC_USER))
      .option("password", ConfigurationManager.config.getString(Constants.JDBC_PASSWORD))
      .option("dbtable", "session_random_extract")
      .mode(SaveMode.Append)
      .save()
  }

  def generateRandomIndexList(extractPerDay: Int,
                              dateSessionCount: Long,
                              hourCountMap: mutable.HashMap[String, Long],
                              hourListMap: mutable.HashMap[String, ListBuffer[Int]]): Unit = {
    for ((hour, count) <- hourCountMap) {
      // 获取一个小时要抽取多少条数据
      var hourExrCount = ((count / dateSessionCount.toDouble) * extractPerDay).toInt
      // 避免一个小时要抽取的数量超过这个小时的总数
      if (hourExrCount > count) {
        hourExrCount = count.toInt
      }
      val random = new Random()

      hourListMap.get(hour) match {
        case None => hourListMap(hour) = new ListBuffer[Int]
          for (elem <- 0 until hourExrCount) {
            var index = random.nextInt(count.toInt)
            while (hourListMap(hour).contains(index)) {
              index = random.nextInt(count.toInt)
            }
            hourListMap(hour).append(index)
          }
        case Some(list) =>
          for (elem <- 0 until hourExrCount) {
            var index = random.nextInt(count.toInt)
            while (hourListMap(hour).contains(index)) {
              index = random.nextInt(count.toInt)
            }
            hourListMap(hour).append(index)
          }
      }
    }
  }


  /**
   * 需求三：获取 top10 热门品类
   *
   */

  def top10PopularCategories(session: SparkSession, taskUUID: String, sessionid2detailRDD: RDD[(String, UserVisitAction)]) = {
    // 第一步：获取每一个 Sessionid 点击过、下单过、支付过的数量

    // 获取所有产生过点击、下单、支付中任意行为的商品类别
    val categoryidRDD: RDD[(Long, Long)] = sessionid2detailRDD.flatMap {
      case (sid, action) => {
        val categoryBuffer = new ArrayBuffer[(Long, Long)]()

        if (action.click_category_id != -1) { // 一个 session 中点击的商品 ID
          categoryBuffer += ((action.click_category_id, action.click_category_id))
        } else if (action.order_category_ids != null) { // 一个 session 中下单的商品 ID 集合
          for (orderCid <- action.order_category_ids.split(",")) {
            categoryBuffer += ((orderCid.toLong, orderCid.toLong))
          }
        } else if (action.pay_category_ids != null) { // 一个 session 中支付的商品 ID 集合
          for (payCid <- action.pay_category_ids.split(",")) {
            categoryBuffer += ((payCid.toLong, payCid.toLong))
          }
        }
        categoryBuffer
      }
    }

    // 对重复的 categoryid 进行去重
    // 得到了所有被点击、下单、支付的商品的品类
    val distinctCategoryIdRdd = categoryidRDD.distinct()

    // 第二步：计算各品类的点击、下单和支付的次数

    // 计算各个品类的点击次数
    val cidClickCountRdd = getClickCount(sessionid2detailRDD)
    // 计算各个品类的下单次数
    val cidOrederCountRdd = getOrdeCount(sessionid2detailRDD)
    // 计算各个品类的支付次数
    val cidPayCountRdd = getPayCount(sessionid2detailRDD)

    // 第三步：join 各品类与它的点击、下单和支付的次数
    // distinctCategoryIdRDD 中是所有产生过点击、下单、支付行为的商品类别
    // 通过 distinctCategoryIdRDD 与各个统计数据的 LeftJoin 保证数据的完整性
    val cidFullCountRDD = getFullCount(distinctCategoryIdRdd, cidClickCountRdd, cidOrederCountRdd, cidPayCountRdd)

    // 第四步：自定义二次排序 key
    // 第五步：将数据映射成<CategorySortKey,info>格式的 RDD，然后进行二次排序（降序）
    // 创建用于二次排序的联合 key —— (CategorySortKey(clickCount, orderCount, payCount), line)
    // 按照：点击次数 -> 下单次数 -> 支付次数 这一顺序进行二次排序
    val sortKeyFullCountRDD: RDD[(CatgegorySortKey, String)] = cidFullCountRDD.map {
      case (cid, countInfo) =>
        val clickCount = StringUtils.getFieldFromConcatString(countInfo, "\\|", Constants.FIELD_CLICK_COUNT).toLong
        val orderCount = StringUtils.getFieldFromConcatString(countInfo, "\\|", Constants.FIELD_ORDER_COUNT).toLong
        val payCount = StringUtils.getFieldFromConcatString(countInfo, "\\|", Constants.FIELD_PAY_COUNT).toLong

        val sortKey = CatgegorySortKey(clickCount, orderCount, payCount)
        (sortKey, countInfo)
    }

    // 第六步：降序排序，用 take(10)取出 top10 热门品类，并写入 MySQL
    val top10CategoryArray: Array[(CatgegorySortKey, String)] = sortKeyFullCountRDD.sortByKey(false).take(10)
    val top10CategoryRdd = session.sparkContext.makeRDD(top10CategoryArray).map {
      case (sortKey, countInfo) =>
        val cid = StringUtils.getFieldFromConcatString(countInfo, "\\|", Constants.FIELD_CATEGORY_ID).toLong
        val clickCount = sortKey.clickCount
        val orderCount = sortKey.orderCount
        val payCount = sortKey.payCount

        Top10Category(taskUUID, cid, clickCount, orderCount, payCount)
    }

    // 写入 MySQL 之前，将 RDD 转化为 Dataframe
    import session.implicits._
    top10CategoryRdd.toDF().write
      .format("jdbc")
      .option("url", ConfigurationManager.config.getString(Constants.JDBC_URL))
      .option("user", ConfigurationManager.config.getString(Constants.JDBC_USER))
      .option("password", ConfigurationManager.config.getString(Constants.JDBC_PASSWORD))
      .option("dbtable", "top10_category")
      .mode(SaveMode.Append)
      .save
    top10CategoryArray
  }

  def getFullCount(distinctCategoryIdRdd: RDD[(Long, Long)],
                   cidClickCountRdd: RDD[(Long, Long)],
                   cidOrederCountRdd: RDD[(Long, Long)],
                   cidPayCountRdd: RDD[(Long, Long)]): RDD[(Long, String)] = {
    val cidClickInfoRdd = distinctCategoryIdRdd.leftOuterJoin(cidClickCountRdd).map {
      case (cid, (category, option)) =>
        val clickCount = if (option.isDefined) option.get else 0
        val aggrCount = Constants.FIELD_CATEGORY_ID + "=" + cid + "|" + Constants.FIELD_CLICK_COUNT + "=" + clickCount
        (cid, aggrCount)
    }
    val cidOrderInfoRdd = cidClickInfoRdd.leftOuterJoin(cidOrederCountRdd).map {
      case (cid, (clickInfo, option)) =>
        val orderCount = if (option.isDefined) option.get else 0
        val aggrCount = clickInfo + "|" + Constants.FIELD_ORDER_COUNT + "=" + orderCount

        (cid, aggrCount)
    }
    val cidPayInfoRdd = cidOrderInfoRdd.leftOuterJoin(cidPayCountRdd).map {
      case (cid, (orderInfo, option)) =>
        val payCount = if (option.isDefined) option.get else 0
        val aggrCount = orderInfo + "|" + Constants.FIELD_PAY_COUNT + "=" + payCount
        (cid, aggrCount)
    }
    cidPayInfoRdd
  }

  //统计品次的点击次数
  def getClickCount(FilterActionRdd: RDD[(String, UserVisitAction)]): RDD[(Long, Long)] = {
    val clickFilterRdd = FilterActionRdd.filter(item => item._2.click_category_id != null)
    val clickFilterMapRdd: RDD[(Long, Long)] = clickFilterRdd.map {
      case (sid, action) => (action.click_category_id, 1L)
    }
    clickFilterMapRdd.reduceByKey(_ + _)
  }

  //统计下单次数
  def getOrdeCount(FilterActionRdd: RDD[(String, UserVisitAction)]): RDD[(Long, Long)] = {
    val orderFilterRdd: RDD[(String, UserVisitAction)] = FilterActionRdd.filter(item => item._2.order_category_ids != null)
    val orderNumRdd: RDD[(Long, Long)] = orderFilterRdd.flatMap {
      case (sessionId, action) => action.order_category_ids.split(",").map(item => (item.toLong, 1L))
    }
    orderNumRdd.reduceByKey(_ + _)
  }

  //统计付款次数
  def getPayCount(FilterActionRdd: RDD[(String, UserVisitAction)]) = {
    val payFilterRdd = FilterActionRdd.filter(item => item._2.pay_category_ids != null)
    val payNumRDD = payFilterRdd.flatMap {
      case (sid, action) =>
        action.pay_category_ids.split(",").map(item => (item.toLong, 1L))
    }
    payNumRDD.reduceByKey(_ + _)
  }





  /**
   * 需求四：获取 top10 热门品类的活跃 session
   *
   */

  def top10ActiveSession(session: SparkSession,
                         taskUUID: String,
                         sessionid2ActionRDD: RDD[(String, UserVisitAction)],
                         top10CategoryArray: Array[(CatgegorySortKey, String)]): Unit = {
    // 第一步：将 top10 热门品类的 id，生成一份 RDD

    // 获得所有需要求的 category 集合
    val cidArray: Array[Long] = top10CategoryArray.map {
      case (sortKey, countInfo) =>
        val cid = StringUtils.getFieldFromConcatString(countInfo, "\\|", Constants.FIELD_CATEGORY_ID).toLong
        cid
    }

    // 第二步：计算 top10 品类被各 session 点击的次数

    // sessionid2ActionRDD 是符合过滤(职业、年龄等)条件的完整数据
    val sessionIdActionRDD: RDD[(String, UserVisitAction)] = sessionid2ActionRDD.filter {
      case (sessionId, action) =>
        cidArray.contains(action.click_category_id)
    }
    val sessionIdGroupRdd: RDD[(String, Iterable[UserVisitAction])] = sessionIdActionRDD.groupByKey()
    // 获取每个品类被每一个 Session 点击的次数
    val cidSessionCountRDD: RDD[(Long, String)] = sessionIdGroupRdd.flatMap {
      case (sessionId, userVisitActions) =>
        val categoryCountMap = new mutable.HashMap[Long, Long]()
        // userVisitActions 中聚合了一个 session 的所有用户行为数据
        // 遍历 userVisitActions 是提取 session 中的每一个用户行为，并对每一个用户行为中的点击事件进行计数
        for (userVisitAction <- userVisitActions) {
          val cid = userVisitAction.click_category_id
          // 如果 categoryCountMap 中尚不存在此点击品类，则新增品类
          if (!categoryCountMap.contains(cid)) {
            categoryCountMap += (cid -> 0)
          }
          categoryCountMap.update(cid, categoryCountMap(cid) + 1)

        }
        // 对 categoryCountMap 中的数据进行格式转化
        for ((cid, count) <- categoryCountMap)
          yield (cid, sessionId + "=" + count)

    }

    // 第三步：分组取 TopN 算法实现，获取每个品类的 top10 活跃用户

    // 先按照品类分组
    val cidGroupRDD: RDD[(Long, Iterable[String])] = cidSessionCountRDD.groupByKey()

    // 将每一个品类的所有点击排序，取前十个，并转换为对象
    val top10SessionsRDD = cidGroupRDD.flatMap {
      case (cid, iterableSessionCount) =>
        // 先排序，然后取前 10
        val sortList: List[String] = iterableSessionCount.toList.sortWith((item1, item2) => {
          item1.split("=")(1).toLong > item2.split("=")(1).toLong
        }).take(10)
        // 重新整理数据
        val top10Sessions = sortList.map {
          case item =>
            val sessionId = item.split("=")(0)
            val count = item.split("=")(1).toLong
            Top10Session(taskUUID, cid, sessionId, count)
        }
        top10Sessions
    }

    // 将结果以追加方式写入到 MySQL 中
    import session.implicits._
    top10SessionsRDD.toDF().write
      .format("jdbc")
      .option("url", ConfigurationManager.config.getString(Constants.JDBC_URL))
      .option("user", ConfigurationManager.config.getString(Constants.JDBC_USER))
      .option("password", ConfigurationManager.config.getString(Constants.JDBC_PASSWORD))
      .option("dbtable", "top10_session")
      .mode(SaveMode.Append)
      .save()
  }


}
