package com.sunzm.spark.core

import com.alibaba.fastjson.JSON
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 *
 * Spark Core示例程序
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-05-31 18:46
 */
object SparkCoreDemo {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("RDD示例")
      .setMaster("local[*]")
    val sc = new SparkContext(conf)

    val dataRDD: RDD[String] = sc.textFile("data/spark/sql/msgLog.log")

    //过滤出来action=5的消息，并且转换成 (sesssionId, 1)的格式
    val mapRDD: RDD[(String, Int)] = dataRDD.filter(line => {
      var res = false

      try {
        val jSONObject = JSON.parseObject(line)
        val action = jSONObject.getIntValue("action")

        if (action == 5) {
          res = true
        }
      } catch {
        case _: Throwable => {
          println(s"异常数据: ${line}")
        }
      }

      res
    }).map(line => {
      val jSONObject = JSON.parseObject(line)
      val sessionId = jSONObject.getString("sessionId")

      (sessionId, 1)
    })

    //按照第一个元素（sessionId）分组，然后调用map方法，求每个分组的数量
    val groupRDD: RDD[(String, Int)] = mapRDD.groupBy(t => t._1)
      .map(t => {
        val sessionId = t._1
        val msgs: Iterable[(String, Int)] = t._2
        val sessionCount = msgs.size

        (sessionId, sessionCount)
      })

    //打印结果
    groupRDD.foreachPartition(p => {
      p.foreach{
        case (sessionId, sessionCount) => {
          println(s"groupBy: ${sessionId} -> ${sessionCount}")
        }
      }
    })

    //使用reduceByKey实现相同的功能
    //reduceByKey会先进行本地聚合，效率更高
    //groupBy只能先分组，再调用map方法处理每个组的数据
    // reduceByKey可以直接传入一个函数，处理每个分组中的数据，不需要关注key了
    val reduceRDD: RDD[(String, Int)] = mapRDD.reduceByKey((count1, count2) => {
      count1 + count2
    })

    reduceRDD.foreachPartition(p => {
      p.foreach{
        case (sessionId, sessionCount) => {
          println(s"reduceByKey: ${sessionId} -> ${sessionCount}")
        }
      }
    })

    sc.stop()
  }
}
