package com.haozhen.sql

/**
  * @author haozhen
  * @email haozh@ync1.com
  * @date 2021/1/31  13:18
  */
object FileDemo {

  def main(args: Array[String]): Unit = {
    import org.apache.spark.sql.{DataFrame, SparkSession}
    val spark: SparkSession = SparkSession.builder().master("local[*]").appName(this.getClass.getCanonicalName().init).getOrCreate()

    import spark.implicits._

    import org.apache.spark.sql.functions._


    val schema = "name string,age int,job string"
    val frame: DataFrame = spark.read
                  // 指定是否使用第一行作为字段名称
                  .options(Map(("header","true")
                  // 指定分隔符
                   ,("delimiter",";")
                  //
                            ,("inferschame","true")))
      .schema(schema)
                  .csv("file:///E:/javaCode/sparksql/data/people2.csv")

    //定义参数

    frame.show()
    println(frame.queryExecution)
    spark.close()
  }

}
