package com.sunzm.spark.sql.exercise

import com.alibaba.fastjson.JSON
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object DataFrameExercise2 {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName(s"${this.getClass.getSimpleName.stripSuffix("$")}")
      .master("local[*]")
      .config("spark.default.parallelism", 2)
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //手动解析
    val dataDS: Dataset[String] = spark.read.textFile("data/spark/sql/msgLog.log")

    val mapDS: Dataset[(String, Int)] = dataDS.filter(line => {
      var res = false
      try {
        val jSONObject = JSON.parseObject(line)

        val action: Int = jSONObject.getIntValue("action")

        if (action == 5) {
          res = true
        }
      } catch {
        case e: Throwable => {
          e.printStackTrace()
        }
      }

      res
    }).map(line => {
      val jSONObject = JSON.parseObject(line)
      val sessionId = jSONObject.getString("sessionId")
      val action: Int = jSONObject.getIntValue("action")

      (sessionId, action)
    }).toDF("sessionId", "action")
      .as[(String, Int)]

    mapDS.show(10)

    //DataSet API
    //val dsResultDS: Dataset[(String, Long)] = mapDS.groupByKey(t => t._1).count()
    val dsResultDS: Dataset[(String, Long)] = mapDS
      .groupByKey {
        case (sessionId, _) => sessionId
      }.count()

    println("DSL.......")
    dsResultDS.foreachPartition((p: Iterator[(String, Long)]) => {
      p.foreach {
        case (sessionId, sessionCount) => {
          println(s"DSL: ${sessionId} -> ${sessionCount}")
        }
      }
    })

    //SQL
    mapDS.createTempView("v_msgLog")
    val sqlResultDF: DataFrame = spark.sql(
      """
        |SELECT sessionId, COUNT(*) AS sessionCount FROM v_msgLog
        | GROUP BY sessionId
        |""".stripMargin)

    println("SQL.......")
    sqlResultDF.foreachPartition((p: Iterator[Row]) => {
      p.foreach {
        case Row(sessionId, sessionCount) => {
          println(s"SQL: ${sessionId} -> ${sessionCount}")
        }
      }
    })

    spark.stop()
  }
}
