package io.sqrtqiezi.spark.dataset

import org.apache.spark.sql.SparkSession

case class Flight(DEST_COUNTRY_NAME: String, ORIGIN_COUNTRY_NAME: String, count: BigInt)
case class FlightMetadata(count: BigInt, randomData: BigInt)

object DatasetPractise {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local")
      .appName("dataset sample")
      .getOrCreate()

    import spark.implicits._

    val flightDF = spark.read
      .parquet("data/flight-data/parquet/2010-summary.parquet")
    val flights = flightDF.as[Flight]

    flights.show
    println(flights.first.DEST_COUNTRY_NAME)

    // join with sample
    val flightsMeta = spark.range(500)
      .map(x => (x, scala.util.Random.nextInt))
      .withColumnRenamed("_1", "count")
      .withColumnRenamed("_2", "randomData")
      .as[FlightMetadata]

    val flights2 = flights.joinWith(flightsMeta, flights.col("count") === flightsMeta.col("count"))
    flights2.show(truncate = false)

    // group
    flights.groupByKey(x => x.DEST_COUNTRY_NAME)
      .count()
      .show

    // group sum
    def grpSum(countryName: String, values: Iterator[Flight]) = {
      values.dropWhile(_.count < 5).map(x => (countryName, x))
    }
    flights.groupByKey(x => x.DEST_COUNTRY_NAME)
      .flatMapGroups(grpSum)
      .show(truncate = false)
  }
}
