package com.wu.spark

import org.apache.spark.sql.SparkSession

object DfSave {

  def main(args: Array[String]): Unit = {



    val spark = SparkSession.builder().appName("DfSave").master("local[2]").getOrCreate()
    import spark.implicits._
    //读json 的第一种方式
    //val df = spark.read.format("json").load("E://ideaWorkSpace2018.4.23//Spark-sql//src//t.json");
              // 读json 的第二种方式
      val df = spark.read.json("E://ideaWorkSpace2018.4.23//Spark-sql//src//t.json")
        df.printSchema()
        df.show();
    // 将查询出来的结果写入到一个地方
   //     df.select("age","name").write.format("parquet").save("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//mynew.parquet")

    // 直接写到parequet  的第二种方式
      df.write.parquet("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//mynew3.parquet")
    val df3 = spark.read.parquet("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//mynew3.parquet");
    df3.createOrReplaceTempView("student");

    spark.sql("select * from student").show()

  //  val df2 =  spark.sql("select * from parquet.'E://ideaWorkSpace2018.4.23//Spark-sql//Sources//users.parquet'")

        print("******************************************")
    //df2.show()


    /*val sqldf = spark.sql("select * from newjson.'E://ideaWorkSpace2018.4.23//Spark-sql//Sources//newjson.json'")
        sqldf.show()*/
        spark.close();

  }

}
