package com.guchenbo.spark.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{DateType, DoubleType, IntegerType, StringType, StructField, StructType}

import java.sql.Struct

object CsvReader {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().master("local[*]").appName("demo").getOrCreate()
    val sc = spark.sparkContext

    val fields = List[StructField](StructField("name", StringType), StructField("age", DoubleType), StructField("job", StringType), StructField("birth", DateType))
//    val fields = List[StructField](StructField("name", StringType), StructField("age", IntegerType), StructField("job", StringType), StructField("birth", DateType))
    val schema = StructType(fields)

    val d = 1

    d.isNaN

    var dataMassCheck = "PERMISSIVE"
//    var dataMassCheck="DROPMALFORMED"
//    var dataMassCheck="FAILFAST"

//    spark.sql()
    val sr = spark.read
      .option("sep", ";")
      .option("encoding", "UTF-8")
      .option("header", "true")
      .option("escape", "\"")
      .option("mode", dataMassCheck)
//      .option("columnNameOfCorruptRecord", "columnNameOfCorruptRecord")
//      .option("nanValue","NaN")
//          .option("inferSchema", value = true)

    sr.schema(schema)

    val df = sr.csv(path("people.csv"))
//    df.show()
//    df.printSchema()
    df.createTempView("tes")
    spark.sql("select * from tes").foreachPartition(s=>{
      println("dd")
s.foreach(println(_))
    })

//    df.write.saveAsTable("csv1")
//    spark.sql("select * from csv1").show()
  }

  def path(s: String): String = {
    s"spark-sql/src/main/resources/$s"
  }
}
