package org.example
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset}

object data1_traffic {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    //三种方式读取交通数据RDD DataFrame DataSet
    val rdd1 = sc.textFile("src/main/resources/traffic-data.txt")
    rdd1.take(5).foreach(println)
    println(rdd1.count())
    //监测点编号 监控编号 车牌号 抓拍时间 车速 道路编号 区域编号
    val schema = StructType(Seq(
      StructField("jcID", DataTypes.StringType),
      StructField("jkID", DataTypes.StringType),
      StructField("carID", DataTypes.StringType),
      StructField("time", DataTypes.StringType),
      StructField("speed", DataTypes.StringType),
      StructField("luID", DataTypes.StringType),
      StructField("quID", DataTypes.StringType)
    ))
    import spark.implicits._
    val df:DataFrame = spark.read.text("src/main/resources/traffic-data.txt")
    val ds:Dataset[String] = spark.read.textFile("src/main/resources/traffic-data.txt")
    df.printSchema()
    df.show(3)
    ds.printSchema()
    ds.show(2)
    //如何将txt文件以DataFrame和DataSet的方式读取为表结构
      //1.筛选车速超过98的所有记录
    val res1 = rdd1.filter(x => {
        val y = x.split(",")
        y(4).toDouble > 90
      })
    res1.take(3).foreach(println)
    println(res1.count())
    //2.计算各区域的交通流量并降序排列 聚合
    val res2 = rdd1.map(line => {
      val y = line.split(",")
      val qyID = y(6)
      (qyID,1)
    }).reduceByKey((x,y) => x + y)
    res2.take(5).foreach(println)
    //3.将1跟2的计算结果保存到MySQL数据库中
val res3 = rdd1.map(x => {
  val split = x.split(",")
  val carID = split(2)
  val province = carID.split("-")(0)
  (province, 1)
}).reduceByKey((x,y) => x + y)
    res3.foreach(println)
    //随堂练习：将计算结果写入MySQL数据库（province,caeFlow
    sc.stop()
  }
}
