package org.example

import com.sun.prism.PixelFormat.DataType
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

object data1_traffic {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    val rdd1 = sc.textFile("src/main/resources/traffic-data.txt")
    rdd1.take(5).foreach(println)
    println(rdd1.count())
//
    val schema = StructType(Seq(
      StructField("jcID",DataTypes.StringType),
      StructField("jkID",DataTypes.StringType),
      StructField("carID",DataTypes.StringType),
      StructField("time",DataTypes.StringType),
      StructField("speed",DataTypes.StringType),
      StructField("luID",DataTypes.StringType),
      StructField("quID",DataTypes.StringType),
    ))
    val df: DataFrame = spark.read.text("src/main/resources/traffic-data.txt")
    val ds: Dataset[String] = spark.read.textFile("src/main/resources/traffic-data.txt")
    df.printSchema()
    df.show(3)
    ds.printSchema()
    ds.show(2)
    //
    val res1 = rdd1.filter(x => {
      val y = x.split(",")
      y(4).toDouble > 90
    })
    res1.take(3).foreach(println)
    println(res1.count())
//
    val res2 = rdd1.map(x => {
      val y = x.split(",")
      val qyID = y(6)
      (qyID,1)
    }).reduceByKey((x,y) => x + y)
      .sortBy(_._2, ascending = false)
    res2.take(5).foreach(println)
 //
    val res3 = rdd1.map(x => {
      val split = x.split(",")
      val carID = split(2)
      val province = carID.split("-")(0)
      (province,1)
    }).reduceByKey((x,y) => x + y)
    res3.foreach(println)
    //

    sc.stop()

  }
}
