package com.example.spark.sql

import com.example.entity.Person
import com.example.util.{SparkUtil, YamlUtil}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql._

import java.util

/**
 * @title: OperateDataFrame
 * @projectName bigdata
 * @description: Spark sql test
 *               [TODO] hive 的JDBC 方式连接
 *               复杂的UDF和 UDAF
 * @author leali
 * @date 2022/5/15 23:29
 */
object OperateDataFrame {
  val sparkSession: SparkSession = SparkUtil.initSpark(appName = "OperateDataFrame")
  val sparkContext: SparkContext = sparkSession.sparkContext

  /**
   * RDD、Frame Set 相互转换
   */
  def TransitionFrame(): Unit = {
    // RDD->Data Frame
    //.asInstanceOf[Person]
    val personRDD: RDD[Person] = GetTestRDD.map((line: String) => {
      val person: Array[String] = line.split(" ")
      Person(person(0).toInt, person(1), person(2).toInt)
    })
    import sparkSession.implicits._
    DealDataFrame(personRDD.toDF())
    //close
    sparkSession.close()
  }

  private def GetTestRDD: RDD[String] = {
    sparkContext.textFile("src/data/input/person.txt")
  }

  private def DealDataFrame(frame: DataFrame): Unit = {
    //print
    frame.printSchema()
    frame.show()
  }

  def TransitionFrameBySchema(): Unit = {
    val personRDD: RDD[Row] = GetTestRDD.map((line: String) => {
      val arr: Array[String] = line.split(" ")
      Row(arr(0).toInt, arr(1), arr(2).toInt)
    })
    val schema: StructType = StructType(List(
      StructField("id", IntegerType, nullable = false),
      StructField("name", StringType, nullable = false),
      StructField("age", IntegerType, nullable = false)
    ))
    val frame: DataFrame = sparkSession.createDataFrame(personRDD, schema)
    println("输出原始信息")
    frame.schema.foreach((s: StructField) => println(s.name, s.metadata))

    //添加备注后处理
    val commentMap: Map[String, String] = Map("id" -> "唯一标识", "name" -> "姓名", "age" -> "年龄")
    val newSchema: Seq[StructField] = frame.schema.map((s: StructField) => {
      println(commentMap(s.name))
      s.withComment(commentMap(s.name))
    })
    sparkSession.createDataFrame(frame.rdd, StructType(newSchema)).repartition(10)
    println("输出处理后的信息")
    frame.schema.foreach((s: StructField) => println(s.name, s.metadata))
    sparkSession.close()
  }

  def TransitionRDD2DataFrame2DataSet(): Unit = {
    val personRDD: RDD[Person] = GetTestRDD.map((line: String) => {
      val arr: Array[String] = line.split(" ")
      Person(arr(0).toInt, arr(1), arr(2).toInt)
    })

    //转换1:RDD-->DF
    import sparkSession.implicits._
    val personDF: DataFrame = personRDD.toDF()
    //转换2:RDD-->DS
    val personDS: Dataset[Person] = personRDD.toDS()
    //转换3:DF-->RDD,注意:DF没有泛型,转为RDD时使用的是Row
    val rdd: RDD[Row] = personDF.rdd
    //转换4:DS-->RDD
    val rdd1: RDD[Person] = personDS.rdd
    //转换5:DF-->DS
    val ds: Dataset[Person] = personDF.as[Person]
    //转换6:DS-->DF
    val df: DataFrame = personDS.toDF()

    //print
    println("DataFrame===>")
    personDF.printSchema()
    personDF.show()
    println("DataSet===>")
    personDS.printSchema()
    personDS.show()
    println("RDD===>")
    rdd.foreach(println)
    rdd1.foreach(println)

    ds.show()
    df.show()

    sparkSession.close()
  }

  def simpleQuery(): Unit = {

    val value: RDD[Person] = GetTestRDD.map((line: String) => {
      val arr: Array[String] = line.split(" ")
      Person(arr(0).toInt, arr(1), arr(2).toInt)
    })

    import sparkSession.implicits._
    val personDF: DataFrame = value.toDF()
    personDF.createOrReplaceTempView("t_person")
    //=1.查看name字段的数据
    sparkSession.sql("select name from t_person").show()
    //=2.查看 name 和age字段数据
    sparkSession.sql("select name,age from t_person").show()
    //=3.查询所有的name和age，并将age+1
    sparkSession.sql("select name,age,age+1 from t_person").show()
    //=4.过滤age大于等于25的
    sparkSession.sql("select name,age from t_person where age >= 25").show()
    //=5.统计年龄大于30的人数
    sparkSession.sql("select count(*) from t_person where age > 30").show()
    //=6.按年龄进行分组并统计相同年龄的人数
    sparkSession.sql("select age,count(*) from t_person group by age").show()
    //=7.查询姓名=张三的
    sparkSession.sql("select name from t_person where name = 'zhangsan'").show()
    //===========DSL:面向对象的SQL==============
    //=1.查看name字段的数据
    //personDF.select(personDF.col("name"))
    personDF.select("name").show()
    //=2.查看 name 和age字段数据
    personDF.select("name", "age").show()
    //=3.查询所有的name和age，并将age+1
    //personDF.select("name","age","age+1").show()//错误的:cannot resolve '`age+1`' given input columns: [age, id, name];;
    //注意$是把字符串转为了Column列对象
    personDF.select($"name", $"age", $"age" + 1).show()
    //注意'是把列名转为了Column列对象
    personDF.select('name, 'age, 'age + 1).show()
    //=4.过滤age大于等于25的
    personDF.filter("age >= 25").show()
    personDF.filter($"age" >= 25).show()
    personDF.filter('age >= 25).show()
    //=5.统计年龄大于30的人数
    val count: Long = personDF.where('age > 30).count() //where底层filter
    println("年龄大于30的人数为:" + count)
    //=6.按年龄进行分组并统计相同年龄的人数
    personDF.groupBy('age).count().show()
    //=7.查询姓名=张三的
    personDF.filter("name = 'zhangsan'").show()
    personDF.filter($"name" === "zhangsan").show()
    personDF.filter('name === "zhangsan").show()
    personDF.filter('name =!= "zhangsan").show()

    sparkSession.close()
  }

  def WordCountByDataFrame(): Unit = {

    import sparkSession.implicits._
    //TODO 1.加载数据
    val df: DataFrame = sparkSession.read.text("src/data/input/word.txt")
    val ds: Dataset[String] = sparkSession.read.textFile("src/data/input/word.txt")
    df.printSchema()
    df.show()
    ds.printSchema()
    ds.show()

    //处理数据
    //df.flatMap(_.split(" "))//注意:df没有泛型,不能直接使用split
    val words: Dataset[String] = ds.flatMap((_: String).split(" "))
    words.printSchema()
    words.show()

    //===SQL===
    words.createOrReplaceTempView("t_words")
    val sql: String =
      """
        |select value,count(*) as counts
        |from t_words
        |group by value
        |order by counts desc
        |limit 10
        |""".stripMargin
    sparkSession.sql(sql).show()

    // ===DSL===
    words.groupBy('value)
      .count()
      .orderBy('count.desc)
      .show(10)

    sparkSession.stop()
  }

  def MultiDataSource(): Unit = {

    //底层format("json").load(paths : _*)
    /**
     * Since 2.3 需要创建schema
     */
    val schema: StructType = StructType(List(
      StructField("id", IntegerType, nullable = false),
      StructField("first_name", StringType, nullable = false),
      StructField("last_name", StringType, nullable = false),
      StructField("email", StringType, nullable = false),
      StructField("age", IntegerType, nullable = false),
      StructField("gender", StringType, nullable = false),
      StructField("ip_address", StringType, nullable = false)
    ))
    val df: DataFrame = sparkSession.read
      .schema(schema)
      .json("src/data/input/test.json")
    //val df: DataFrame = spark.read.csv("data/input/csv")//底层format("csv").load(paths : _*)
    df.printSchema()
    df.show()

    df.coalesce(1).write.mode(SaveMode.Overwrite).json("src/data/output/json")
    df.coalesce(1).write.mode(SaveMode.Overwrite).csv("src/data/output/csv")
    df.coalesce(1).write.mode(SaveMode.Overwrite).parquet("src/data/output/parquet")
    df.coalesce(1).write.mode(SaveMode.Overwrite).orc("src/data/output/orc")


    val envInfo: util.LinkedHashMap[String, String] = YamlUtil.getEnvInfo("LOCAL_MYSQL")
    df.coalesce(1).
      write.mode(SaveMode.Overwrite)
      .format("jdbc")
      .option("url", envInfo.get("url"))
      .option("user", envInfo.get("user"))
      .option("password", envInfo.get("password"))
      .option("dbtable", "t_person")
      .save()

    sparkSession.stop()
  }
}
