package io.sqrtqiezi.spark.dataframe

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

object StructuredOperator {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local")
      .appName("flight data")
      .getOrCreate()

    val df = spark.read.format("json")
      .load("data/flight-data/json/2015-summary.json")

    println(df.schema)

    println(df.first())
    df.show(2)

    import spark.implicits._
    val myDF = Seq(("Hello", 2, 1L)).toDF("col1", "col2", "col3")
    myDF.show()

    df.select(expr("*"), lit("hello world").as("message")).show

    df.withColumn("withinCountry", expr("origin_country_name == dest_country_name"))
      .show

    df.withColumnRenamed("dest_country_name", "dest")
      .show

    df.filter(col("count") < 2)
      .where(col("dest_country_name") === "United States")
      .show

    println(df.count)
    println(df.sample(false, 0.1, 5).count)

    df.collect.map(println)

    println(df.rdd.getNumPartitions)

    println(df.repartition(10).rdd.getNumPartitions)

    df.show(5, false)

    println(df.col("count").explain(true))
  }
}
