package org.example

import org.apache.arrow.vector.dictionary.StructSubfieldEncoder
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.{SparkSession, types}
import org.apache.spark.sql.types.{DataTypes, IntegerType, StringType, StructField, StructType}

object aaa {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
//    设置表的结构（列名）和数据类型
    val schemaScore = StructType(Seq(
      StructField("id", IntegerType, nullable = false),
      StructField("name", IntegerType, nullable = false),
      StructField("course", IntegerType, nullable = false),
      StructField("score", IntegerType, nullable = false),
    ))

    val scoreFrame = spark.read.csv("src/main/resources/score.txt")
//    取别名
    val scoreNewName  = scoreFrame.toDF("stu_id","stu_name","stu_course","stu_score")
//    输出数据模式（列名和类型）
    scoreFrame.printSchema()
//    查看数据
    scoreFrame.show()
    scoreNewName.show()
//    json格式数据读取与SQL操作
  val schema = StructType(Seq(
    StructField("id", IntegerType, nullable = false),
    StructField("name", StringType, nullable = true),
    StructField("age", IntegerType, nullable = true),
    StructField("school", DataTypes.createStructType(Array(StructField("schoolName",StringType),StructField("time",StringType)))),
    StructField("score", DataTypes.createArrayType(IntegerType), nullable = true)
))
  val data = spark.read.schema(schema).json("src/main/resources/json.txt")
  data.printSchema()
  data.show()
//    用SQL求清职院毕业学生的平均年龄
  data.createOrReplaceTempView("json")
  val averageAgeQuery = spark.sql(
    """
      |select
      |avg(age) as avg_json
      |from json
      |""".stripMargin
  )
  averageAgeQuery.show()
  sc.stop()
  }
}
