import org.apache.hadoop.shaded.org.eclipse.jetty.websocket.common.frames.DataFrame
import org.apache.spark.sql.{Dataset, SparkSession}

object data1_traffic {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    val rdd1 = sc.textFile("src/main/resources/traffic-data.txt")
    rdd1.take(5).foreach(println)
    println((rdd1.count))
    val df:  DataFrame = spark.read.text("src/main/resources/traffic-data.txt")
    val ds: Dataset[String] = spark.read.textFile("src/main/resources/traffic-data.txt")
    df.()
    df.show(3)
      sc.stop()
  }

}
