package com.sunzm.spark.sql.exercise

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.time.DateFormatUtils
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object DataFrameExercise {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName(s"${this.getClass.getSimpleName.stripSuffix("$")}")
      .master("local[*]")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)
      .getOrCreate()

    import spark.implicits._

    val jSONDF: Dataset[String] = spark.read.textFile("data/spark/sql/msgLog.dat")
    //jSONDF.show()

    //数据格式处理（数据清洗）
    val mapDS: Dataset[(String, String, String)] = jSONDF.filter(StringUtils.isNotBlank(_))
      /* val mapDF = jSONDF.filter(line => {
      StringUtils.isNotBlank(line)
    })*/ .map(line => {
      //解析JSON
      val jSONObject: JSONObject = JSON.parseObject(line)

      var companyId = jSONObject.getString("companyId")
      val sessionId = jSONObject.getString("sessionId")
      val datetime = jSONObject.getLongValue("datetime")

      //如果companyId为空，那么根据sessionId字段的第1位（0开始）的数字进行判断
      if (StringUtils.isBlank(companyId)) {
        val trimStr = StringUtils.trim(sessionId)
        //获取第一个位置的字符
        val ch: String = StringUtils.substring(trimStr, 1, 2)
        ch match {
          case "1" => {
            companyId = "cmp001"
          }
          case "2" => {
            companyId = "cmp002"
          }
          case "3" => {
            companyId = "cmp003"
          }
          case _ => {
            companyId = "---"
          }
        }
      }

      val datetimeStr = DateFormatUtils.format(datetime, "yyyy-MM-dd HH:mm:ss")
      (companyId, sessionId, datetimeStr)
    }).toDF("companyId", "sessionId", "datetime")
      .as[(String, String, String)]

    mapDS.printSchema()

    mapDS.groupByKey(t => {
      t._1
    })

    mapDS.groupByKey(t => {
      t match {
        case (companyId, _, _) => {
          companyId
        }
      }
    })

    println("groupByKey----------")
    mapDS.groupByKey {
      case (companyId, _, _) => {
        companyId
      }
    }.count().show()

    //求每个公司的会话数量，分别使用DataFrame API和SQL的方式
    //val dsDF: DataFrame = mapDF.groupBy("companyId").count()
    println("groupBy----------")
    val dsDF: Dataset[Row] = mapDS.groupBy("companyId").count()
    dsDF.show()

    mapDS.createOrReplaceTempView("v_mgs")

    println("SQL----------")
    val sqlResultDF = spark.sql(
      """
        |SELECT companyId, COUNT(DISTINCT sessionId) AS sessionCount FROM
        | v_mgs
        | GROUP BY companyId
        |""".stripMargin)

    //sqlResultDF.show()
    /*sqlResultDF.foreachPartition(p => {
      p.foreach(row => {
        val companyId = row.getAs[String]("companyId")
        val sessionCount = row.getAs[Long]("sessionCount")


        println(s"公司Id：${companyId}, 会话数量: ${sessionCount}")
      })
    })*/
    //转换成DataSet再操作

    sqlResultDF.as[(String, Long)].foreachPartition((p: Iterator[(String, Long)]) => {
      p.foreach {
        case (companyId, sessionCount) => {
          println(s"公司Id：${companyId}, 会话数量: ${sessionCount}")
        }
      }
    })

    spark.close()
  }

}
