package com.fwmagic.spark.other.sql

import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object SparkSQLDemo {

  // 定义一个case class
  // 会用dataset，通常都会通过case class来定义dataset的数据结构，自定义类型其实就是一种强类型，也就是typed
  case class Person(name: String, age: Long)

  case class Record(key: Int, value: String)

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("SparkSQLDemo")
      //.master("local[*]")
      //SparkSQL 2.0重要变化，必须设置spark.sql.warehouse.dir
      //.config("spark.sql.warehouse.dir","/Users/fangwei/learn/mycode/workspace/spark-warehouse")
      .config("spark.sql.warehouse.dir","/user/hive/warehouse")
      //启用hive支持
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._
/*
    //读取json，构造一个untyped的类型的DataFrame
    val personDF: DataFrame = spark.read.json("/Users/fangwei/learn/mycode/workspace/fwmagic-spark/src/main/resources/person.json")
    personDF.show()
    personDF.printSchema()
    personDF.select($"name").show() //典型的弱类型，untyped操作
    //scala语法，使用$作为前缀
    personDF.select($"name", $"age" + 100).show()
    personDF.select(personDF("name"), personDF("age") + 100).show()

    //filter加表达式的操作
    personDF.filter($"age" > 60).show()

    //groupBy后再聚合
    personDF.groupBy($"age").count().show()

    //创建临时视图
    personDF.createOrReplaceTempView("person")

    spark.sql(
      """
        |select * from person
      """.stripMargin).show()


    //直接给予jvm Object来构造dataset
    val perDS: Dataset[Person] = Seq(Person("露娜", 30), Person("娜可露露", 28)).toDS()
    perDS.show()

    //基于原始数据构造dataset
    val numDS: Dataset[Int] = Seq(1, 3, 5).toDS()
    numDS.map(num => num + 1).show()

    //将DataFrame转换为DataSet
    val personDS: Dataset[Person] = personDF.as[Person]
    personDS.show()*/

    println("========================")


    spark.sql("drop table if exists test.src")
    spark.sql("drop database if exists test")

    spark.sql("create database test")
    //创建hive表
    spark.sql(
      """
        |create table test.src(key int,value string) ROW FORMAT delimited fields terminated by '\t'
      """.stripMargin);

    spark.sql("load data local inpath '/home/hadoop/apps/hive/testdata/src.txt' into table test.src")

    spark.sql("select * from test.src").show()

    spark.sql("select count(1) from test.src").show()

    val sqlHiveDF: DataFrame = spark.sql("select key,value from test.src where key <3 globalsort by value desc")

    val sqlHiveDS: Dataset[String] = sqlHiveDF.map ({
      case Row(key: Int, value: String) => s"KEY:$key,VALUE:$value"
    })

    sqlHiveDS.show()

    val recordsDF: DataFrame = spark.createDataFrame((1 to 100) map(i => Record(i,s"val_$i")))
    recordsDF.createOrReplaceTempView("records")
    spark.sql(
      """
        |select * from test.src join records where src.key=records.key
      """.stripMargin).show()

    spark.stop()
  }

}
