package com.sunzm.spark.sql

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.time.DateFormatUtils
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

/**
 * DataFrame操作示例
 */
object DataFrameDemo {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName(s"${this.getClass.getSimpleName.stripSuffix("$")}")
      .master("local[*]")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)
      .getOrCreate()
    //map和filter算子示例
    //mapAndFilterDemo(spark)
    columnDemo(spark)

    spark.close()
  }

  /**
   * map和filter算子示例
   *
   * @param spark
   */
  private def mapAndFilterDemo(spark: SparkSession) = {
    //导入隐式转换
    import spark.implicits._

    //读取JSON文件，生成DataFrame
    val jSONDF: DataFrame = spark.read.json("data/spark/sql/msgLog.dat")
    //打印表结构信息
    //jSONDF.printSchema()
    val textDF: DataFrame = spark.read.text("data/spark/sql/msgLog.dat")
    textDF.printSchema()

    //filter用来过滤，返回值为布尔型，返回true的记录会被保留，false的会被丢弃, 类似于SQL中的WHERE条件
    //map输入一行，输出一行，主要用于对数据格式进行转换
    val mapDF = textDF.as[String].filter(line => {
      StringUtils.isNotBlank(line)
    }).map(line => {
      //解析JSON
      val jSONObject: JSONObject = JSON.parseObject(line)

      var companyId = jSONObject.getString("companyId")
      val sessionId = jSONObject.getString("sessionId")
      val datetime = jSONObject.getLongValue("datetime")

      //如果companyId为空，那么根据sessionId字段的第1位（0开始）的数字进行判断
      if (StringUtils.isBlank(companyId)) {
        val trimStr = StringUtils.trim(sessionId)
        //获取第一个位置的字符
        val ch: String = StringUtils.substring(trimStr, 1, 2)
        //如果第1位是1，那么companyId =》 cmp001

        //如果第1位是2，那么companyId =》 cmp002
        //如果第1位是3，那么companyId =》 cmp003
        //否则, companyId =》 ---
        //IF ... ELSE ...
        /* if(ch == "1"){
           companyId = "cmp001"
         }else if(ch == "2"){
           companyId = "cmp002"
         }else if (ch == "3"){
           companyId = "cmp003"
         }else{
           companyId = "———"
         }*/
        //CASE
        ch match {
          case "1" => {
            companyId = "cmp001"
          }
          case "2" => {
            companyId = "cmp002"
          }
          case "3" => {
            companyId = "cmp003"
          }
          case _ => {
            companyId = "---"
          }
        }
      }

      val datetimeStr = DateFormatUtils.format(datetime, "yyyy-MM-dd HH:mm:ss")
      (companyId, sessionId, datetimeStr)
    }).toDF("companyId", "sessionId", "datetime")

    mapDF.printSchema()

    mapDF.show()
  }

  private def columnDemo(spark: SparkSession) = {
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //手动解析
    val dataDS: Dataset[String] = spark.read.textFile("data/spark/sql/msgLog.log")

    dataDS.map(line => {
      val jSONObject = JSON.parseObject(line)
      val companyId = jSONObject.getString("companyId")
      val action: Int = jSONObject.getIntValue("action")

      (companyId, action)
    })

    //自动解析
    val dataDF: DataFrame = spark.read.json("data/spark/sql/msgLog.log")
    dataDF.printSchema()

    //过滤action=5的数据（5表示聊天消息）
    //第一种方式，Row
    println("row.........")
    dataDF.filter(row => {
      val action = row.getAs[String]("action")
      action == "5"
    }).show(10, false)

    //dataDF.filter($"action")
  }
}
