package com.sunzm.spark.core.rdd.transformation

import java.io.File

import com.alibaba.fastjson.JSON
import org.apache.commons.io.FileUtils
import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.{HashPartitioner, Partitioner, SparkContext}

/**
 *
 * 使用RDD算子求分组TopN
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-06-30 20:10
 */
object RddGroupTopNDemo {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 6)
      .config("spark.sql.shuffle.partitions", 6)
      .getOrCreate()
    val sc: SparkContext = spark.sparkContext

    val dataRDD: RDD[String] = sc.textFile("data/spark/sql/msgLog.dat")

    //计算每个公司每个会话的消息数
    val companyIdSessionMsgCountRDD: RDD[((String, String), Int)] = dataRDD.filter(line => {
      var res = false
      try {
        val jSONObject = JSON.parseObject(line)

        val companyId: String = jSONObject.getString("companyId")
        val sessionId: String = jSONObject.getString("sessionId")
        val action: Int = jSONObject.getIntValue("action")

        if (action == 5 && StringUtils.isNotBlank(companyId) && StringUtils.isNotBlank(sessionId)) {
          res = true
        }
      } catch {
        case e: Throwable => {
          e.printStackTrace()
        }
      }

      res
    }).map(line => {
      val jSONObject = JSON.parseObject(line)
      val companyId = jSONObject.getString("companyId")
      val sessionId = jSONObject.getString("sessionId")

      ((companyId, sessionId), 1)
    }).reduceByKey(_ + _)

    //换行成(SessionMsgCount对象,1)的格式
    val sessionMsgContRDD: RDD[(SessionMsgCount, Int)] = companyIdSessionMsgCountRDD.map {
      case ((companyId, sessionId), msgCount) => (SessionMsgCount(companyId, sessionId, msgCount), 1)
    }

    val parallelism = sc.defaultParallelism

    //注意：想要调用 repartitionAndSortWithinPartitions, key必须是可以排序的
    //SessionMsgCount对象继承了Ordered类，重写了compare方法，这样就可以按照消息数量排序了
    //而分区器也是自定义的，按照公司ID分区
    val resultRDD: RDD[(String, String, Int, Int)] = sessionMsgContRDD.repartitionAndSortWithinPartitions(new MyHashPartitioner(parallelism))
      .mapPartitions(p => {
        var i = 0
        p.map(t => {
          val sessionMsgCount: SessionMsgCount = t._1
          i += 1
          (sessionMsgCount.companyId, sessionMsgCount.sessionId, sessionMsgCount.msgCount, i)
        })
      }).filter(_._4 <= 10)

    val writeFileName = "data/spark/sql/topN.txt"
    FileUtils.deleteQuietly(new File(writeFileName))

    resultRDD.saveAsTextFile(writeFileName)

    spark.stop()
  }

  /**
   * 重写一个Partitioner，按照companyId分区
   *
   * @param partitions
   */
  private class MyHashPartitioner(partitions: Int) extends Partitioner {
    def numPartitions: Int = partitions

    def getPartition(key: Any): Int = key match {
      case null => 0
      //主要是重写这个地方
      case _ => {
        val k = key.asInstanceOf[SessionMsgCount]
        nonNegativeMod(k.companyId.hashCode, numPartitions)
      }
    }

    override def equals(other: Any): Boolean = other match {
      case h: HashPartitioner =>
        h.numPartitions == numPartitions
      case _ =>
        false
    }

    override def hashCode: Int = numPartitions
  }

  private def nonNegativeMod(x: Int, mod: Int): Int = {
    val rawMod = x % mod
    rawMod + (if (rawMod < 0) mod else 0)
  }

  private[transformation] case class SessionMsgCount(companyId: String, sessionId: String, msgCount: Int) extends Ordered[SessionMsgCount]{
    override def compare(that: SessionMsgCount): Int = {
      //升序
      //this.msgCount - that.msgCount
      //倒序
      that.msgCount - this.msgCount
    }
  }

}
