package df

import org.apache.spark.SparkConf
import org.apache.spark.sql.functions.avg
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}


object E3_sql {
  // 创建 Spark 运行配置对象
  val sparkConf = new SparkConf().setMaster("local[*]").setAppName("Hello")
  //创建 SparkSession 对象
  val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

  import spark.implicits._

  def main(args: Array[String]): Unit = {
    df_sql
  }


  def df_sql = {
    // 读取原始数据，创建DataFrame
    val data = Seq(
      (1, "老A", 30, 1),
      (2, "大B", 25, 0),
      (3, "小C", 15, 0)
    )
    val df: DataFrame = data.toDF("id", "name", "age", "sex")

    // 注册DataFrame为临时视图
    df.createOrReplaceTempView("people")

    // 使用SELECT语句选择name列
    val result1: DataFrame = spark.sql("SELECT name FROM people")
    result1.show()

    // 使用WHERE语句过滤年龄小于等于25岁的人
    val result2: DataFrame = spark.sql("SELECT * FROM people WHERE age <= 25")
    result2.show()

    // 使用GROUP BY语句按照sex列对数据进行分组，并计算每个组的平均年龄
    val result3: DataFrame = spark.sql("SELECT sex, AVG(age) as avg_age FROM people GROUP BY sex")
    result3.show()

    // 使用ORDER BY语句按照age列对数据进行升序排序
    val result4: DataFrame = spark.sql("SELECT name, age FROM people ORDER BY age")
    result4.show()

    // 创建另一个DataFrame
    val addressData = Seq(
      (1, "苏州"),
      (2, "无锡"),
      (3, "广州")
    )
    val addressDF: DataFrame = addressData.toDF("id", "city")

    // 注册另一个DataFrame为临时视图
    addressDF.createOrReplaceTempView("addresses")

    // 使用JOIN语句将people表和addresses表连接在一起
    val result5: DataFrame =
      spark.sql("SELECT p.name, p.age, a.city FROM people p JOIN addresses a ON p.id = a.id")
    result5.show()

    //关闭 Spark
    spark.stop()
  }

  def ds_sql = {

    // 读取原始数据，创建Dataset
    val data = Seq(
      Person(1, "老A", 30, 1),
      Person(2, "大B", 25, 0),
      Person(3, "小C", 15, 0)
    )
    val ds: Dataset[Person] = data.toDS()

    // 注册Dataset为临时视图
    ds.createOrReplaceTempView("people")

    // 使用SELECT语句选择name列
    val result1: Dataset[String] = spark.sql("SELECT name FROM people").as[String]
    result1.show()

    // 使用WHERE语句过滤年龄小于等于25岁的人
    val result2: Dataset[Person] = spark.sql("SELECT * FROM people WHERE age <= 25").as[Person]
    result2.show()

    // 使用GROUP BY语句按照sex列对数据进行分组，并计算每个组的平均年龄
    val result3: Dataset[(Int, Double)] = spark
      .sql("SELECT sex, AVG(age) as avg_age FROM people GROUP BY sex")
      .as[(Int, Double)]
    result3.show()

    // 使用ORDER BY语句按照age列对数据进行升序排序
    val result4: Dataset[Person] = spark.sql("SELECT * FROM people ORDER BY age").as[Person]
    result4.show()

    // 创建另一个Dataset
    val addressData = Seq(
      Address(1, "苏州"),
      Address(2, "无锡"),
      Address(3, "广州")
    )
    val addressDS: Dataset[Address] = addressData.toDS()

    // 注册另一个DataFrame为临时视图
    addressDS.createOrReplaceTempView("addresses")

    // 使用JOIN语句将people表和addresses表连接在一起
    val joinedResult: DataFrame =
      spark.sql("SELECT p.name, p.age, a.city FROM people p JOIN addresses a ON p.id = a.id")
    joinedResult.show()

    //关闭 Spark
    spark.stop()
  }

  case class Address(id: Int, city: String)

  case class Person(id: Int, name: String, age: Int, sex: Int)


  def df_dsl = {
    // 读取原始数据，创建DataFrame
    val data = Seq(
      (1, "老A", 30, 1),
      (2, "大B", 25, 0),
      (3, "小C", 15, 0)
    )
    val df: DataFrame = data.toDF("id", "name", "age", "sex")

    // 使用select()选择name列
    val result1: DataFrame = df.select($"name")
    result1.show()

    // 使用filter()过滤年龄小于等于25岁的人
    val result2: DataFrame = df.filter($"age" <= 25)
    result2.show()

    // 使用groupBy()按照sex列进行分组，并计算每个组的平均年龄
    val result3: DataFrame = df.groupBy($"sex").agg(avg($"age").alias("avg_age"))
    result3.show()

    // 使用orderBy()按照age列进行升序排序
    val result4: DataFrame = df.orderBy($"age")
    result4.show()

    // 创建另一个DataFrame
    val addressData = Seq(
      (1, "苏州"),
      (2, "无锡"),
      (3, "广州")
    )
    val addressDF: DataFrame = addressData.toDF("id", "city")

    // 使用DSL语法进行JOIN操作
    val result5: DataFrame = df.join(addressDF, df("id") === addressDF("id"))
      .select(df("name"), df("age"), addressDF("city"))
    result5.show()

    //关闭 Spark
    spark.stop()
  }

  def ds_dsl: Unit = {
    // 读取原始数据，创建Dataset
    val data = Seq(
      Person(1, "老A", 30, 1),
      Person(2, "大B", 25, 0),
      Person(3, "小C", 15, 0)
    )
    val ds: Dataset[Person] = data.toDS()

    // 使用select()选择name列
    val result1: Dataset[String] = ds.select($"name").as[String]
    result1.show()

    // 使用filter()过滤年龄小于等于25岁的人
    val result2: Dataset[Person] = ds.filter($"age" <= 25)
    result2.show()

    // 使用groupBy()按照sex列进行分组，并计算每个组的平均年龄
    val result3: Dataset[(Int, Double)] = ds.groupBy($"sex").agg(avg($"age").as("avg_age"))
      .as[(Int, Double)]
    result3.show()

    // 使用orderBy()按照age列进行升序排序
    val result4: Dataset[Person] = ds.orderBy($"age")
    result4.show()

    // 创建另一个Dataset
    val addressData = Seq(
      Address(1, "苏州"),
      Address(2, "无锡"),
      Address(3, "广州")
    )
    val addressDS: Dataset[Address] = addressData.toDS()

    // 使用DSL语法进行JOIN操作，并转换为Dataset
    val result5: Dataset[(String, Int, String)] = ds
      .joinWith(addressDS, ds("id") === addressDS("id"))
      .map { case (person, address) => (person.name, person.age, address.city) }
    result5.show()

    //关闭 Spark
    spark.stop()
  }
}
