package org.example

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object sparkData2_traffic {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
//数据加载 监测点编号 车牌号 抓拍时间 车速 道路编号 区域编号
    val trafficRDD:RDD[String]=sc.textFile("src/main/resources/traffic-data.txt")
    trafficRDD.take(5).foreach(println)
    println(trafficRDD.count())
    //1.找出所有车速大于90以上的交通数据
val res1=trafficRDD.filter(line => {
  val split=line.split(",")
  split(4).toDouble > 90
})
    res1.foreach(println)
    //2.统计各个区域的交通流量 按照交通流量降序排列 聚合方法
//    val res2 = trafficRDD.filter(line => {
//      val split = line.split(",")
//      val qyID =split(6)
//      (qyID,1)
//    }).reduceByKey((x,y) => x + y)
//      .sortBy(_._2,ascending = false)
//    res2.take(5).foreach(println)
    //3.统计各个省份车牌的交通流量
      val res3 = trafficRDD.map(line => {
      val split = line.split(",")
      val carID = split(2)
      val province = carID.split("-")(0)
      (province,1)
    }).reduceByKey((x,y) => x + y)
    res3.foreach(println)

    sc.stop()

  }
}
