package com.xiaoxu.spark.tuning

import com.xiaoxu.spark.DateFrameWithDataset.DataFrameCase.logger
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SaveMode, SparkSession}

import scala.collection.mutable.ListBuffer

/**
  *ORC: https://cwiki.apache.org/confluence/display/Hive/LanguageManual+ORC
  */

object fileTypeDemo {

  def main(args: Array[String]): Unit = {
    //val conf = new SparkConf()
    //conf.registerKryoClasses(Array(classOf[Student],classOf[People]))

    val spark = SparkSession.builder()
      .appName("DataFrameRDDApp")
      .master("local[6]")
      //.config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      //.config(conf)
      //.config("spark.scheduler.mode", "FAIR")
      .getOrCreate()


    val students = ListBuffer[Student]()

    for ( i <- 1 to 1000000){
      students += Student(1,"fddfssdf","会计师的防空洞","是的范德萨范德萨")
    }


    var rdd = spark.sparkContext.parallelize(students)
    //注意：需要导入隐式转换
    import spark.implicits._
    // 注意添加转义字符
    val studentDF = rdd.toDF()

    studentDF
      .write
      .mode(SaveMode.Overwrite)
      .format("avro")
      .save("data/output/avro")



    spark.stop()
  }

  case class Student(id: Int, name: String, phone: String, email: String)

}
