package org.example

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DataTypes, IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

object data1_traffic {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()


    val sc = spark.sparkContext
    val rdd1:RDD[String]=sc.textFile("src/main/resources/traffic-data.txt")
    rdd1.take(5).foreach(println)
    println(rdd1.count())
//  加表头
val schema = StructType(Seq(
  StructField("jcid", DataTypes.StringType),
  StructField("jkId", DataTypes.StringType),
  StructField("carId",DataTypes.StringType),
  StructField("time", DataTypes.StringType),
  StructField("speed",DataTypes.StringType ),
  StructField("luId", DataTypes.StringType),
  StructField("quId", DataTypes.StringType)
))
    val df:DataFrame=spark.read.text("src/main/resources/traffic-data.txt")
    val ds:Dataset[String]=spark.read.textFile("src/main/resources/traffic-data.txt")
    df.printSchema()
    df.show(3)
    ds.printSchema()
    ds.show(2)
//如何将txt文件以DataFrame和Dataset方式读取为表结构？
// 1.筛选车速超过90的所有记录
    val res1=rdd1.filter(x=>{
  val y=x.split(",")
  y(4).toDouble>90
})
    res1.take(5).foreach(println)
    println(res1.count())
// 2.计算区域的交通流量并降序排列
    val res2=rdd1.map(x=>{
      val y=x.split(",")
      val qyID=y(6)
      (qyID,1)
    }).reduceByKey((x,y)=>x+y)
      .sortBy(_._2,ascending =false)
//    res2.take(5).foreach(println)
// 3.将各省份的交通流量的计算结果保存MySQL数据库中
    val res3=rdd1.map(x=>{
      val split=x.split(",")
      val carID=split(2)
      val province=carID.split("-")(0)
      (province,1)
    }).reduceByKey((x,y)=>x+y)
    res3.foreach(println)
// 练习：将计算结果写入MySQL数据库（province，carFlow）
val jdbcHostname = "your_jdbc_hostname"
     val jdbcPort = "your_jdbc_port"
     val jdbcDatabase = "your_database"
     val jdbcUrl = s"jdbc:mysql://${jdbcHostname}:${jdbcPort}/${jdbcDatabase}"

     val connectionProperties = new java.util.Properties()
     connectionProperties.put("user", "your_username")
     connectionProperties.put("password", "your_password")
    import spark.implicits._
     res3.toDF("province", "carFlow")
       .write
       .mode("overwrite")
       .jdbc(jdbcUrl, "traffic_flows", connectionProperties)

    sc.stop()
  }

}
