package com.wu.spark

import org.apache.spark.sql.SparkSession

object runParquetSchemaMergingExample {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().appName("runParquetSchemaMergingExample").master("local[2]").getOrCreate()

    //rddParquethebing(spark)
    jsonhebing(spark)
  }


  private def jsonhebing(spark:SparkSession): Unit =
  {

   val df1 = spark.read.format("text").load("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//people.txt");
    df1.write.parquet("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//student2//key=1")

   val df2 = spark.read.format("text").load("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//nwepeople.txt");
    df2.write.save("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//student2//key=2");

    val df3 = spark.read.parquet("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//student2");

    df3.createOrReplaceTempView("test")
    spark.sql("select * from test").show()
    df3.show()

  }


  private  def  rddParquethebing(spark: SparkSession): Unit =
  {
    import spark.implicits._
    val df1 = spark.sparkContext.makeRDD(1 to 5).map(i =>(i,i*i)).toDF("value","square")
    df1.write.parquet("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//student//key=3")

    val df2 = spark.sparkContext.makeRDD(6 to 10).map(i =>(i,i*i*i)).toDF("value","clbe")
    df2.write.parquet("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//student//key=4")

    val mergedDF = spark.read.option("mergeSchema","true").parquet("E://ideaWorkSpace2018.4.23//Spark-sql//Sources//student")


    

    mergedDF.printSchema()
    mergedDF.show()
  }

}
