package com.doit.sparksql.day01

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, Dataset, Row}

/**
 * @DATE 2022/1/13/11:02
 * @Author MDK
 * @Version 2021.2.2
 *
 *    json 中的正常数据进行解析
 *    脏数据的过滤(filter和where字段过滤的两种方法)
 *
 *    隐式导入  import spark.implicits._
 * */
object SQL_Json02 {
  Logger.getLogger("org").setLevel(Level.ERROR)
  def main(args: Array[String]): Unit = {
    val spark = SQLUtil.getSession

    //加载JSON数据
    val df: DataFrame = spark.read.json("sql_data/json/b.json")
    df.printSchema()
    df.createTempView("tb_json")
    spark.sql("select * from tb_json").show()
    //查勘表结构打印的结果  _corrupt_record列存储脏数据
    /*
    * +--------------------+----+------+----+--------+
      |     _corrupt_record| age|gender|  id|    name|
      +--------------------+----+------+----+--------+
      |                null|  23|female|   1|     zss|
      |                null|  25|  male|   2|     lss|
      |                null|  34|female|   3|      ww|
      |                null|  23|  male|   4| zhaoliu|
      |                null|  23|female|   5|chenyuan|
      |{"id": 6", name":...|null|  null|null|    null|
      |{"id": 7, "name":...|null|  null|null|    null|
      +--------------------+----+------+----+--------+
    *
    *
    * */

    import spark.implicits._
    val ds: Dataset[Row] = df.filter($"_corrupt_record".isNull)
    ds.rdd.map(row => row.getAs[String]("name")).foreach(println)

    println("---------------------------sql语句打印----------------------------------")
    df.createTempView("tb_json2")
    spark.sql(
      """
        |select
        |id,name,age,gender
        |from tb_json2
        |where
        |_corrupt_record
        |is null
        |""".stripMargin).show(3)
    //show()函数表示的是打印的前几行
    spark.close()
  }
}
