package com.shujia.spark.opt

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo2CarCache {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[8]")
      .appName("car")
      .config("spark.sql.shuffle.partitions", 7)
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //读取数据
    val carDF: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("tollgate_id STRING, license_plate STRING, timestamp STRING, longitude STRING, latitude STRING, speed STRING, city STRING, road STRING, district STRING")
      .load("kafka-python/cars.log")

    carDF.cache()

    //统计每个城市的车流量
    carDF
      .groupBy($"city")
      .agg(count($"city") as "flow")
      .write
      .format("csv")
      .option("sep", ",")
      .mode(SaveMode.Overwrite)
      .save("data/city_flow")


    //同每个到了的车流量
    carDF
      .groupBy($"road")
      .agg(count($"road") as "flow")
      .write
      .format("csv")
      .option("sep", ",")
      .mode(SaveMode.Overwrite)
      .save("data/road_flow")


    carDF
      .groupBy($"district")
      .agg(count($"district") as "flow")
      .write
      .format("csv")
      .option("sep", ",")
      .mode(SaveMode.Overwrite)
      .save("data/district_flow")


    while (true) {}
  }
}
