package com.sugon.ww

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{StringType, StructField, StructType}

import scala.collection.mutable.ArrayBuffer

object GsxxTest {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)

    val spark = SparkSession.builder()
      //      .config("spark.sql.planner.skewJoin", "true")
      //      .config("spark.debug.maxToStringFields", 100)
      //      .config("spark.sql.planner.skewJoin.threshold", "500000")
      .master("local[2]")
      //      .config("spark.locality.wait", 0)
      .appName("sugon_spark_gsxx")
      .getOrCreate()

    spark.range(100).toDF("abc").repartition(3)

    spark.read.option("inferSchema","true")
      .option("header","true")
      .csv("/abc")



    val schema = StructType(ArrayBuffer(
      StructField("subject_info_type", StringType, true),
      StructField("info_name", StringType, true),
      StructField("name", StringType, true),
      StructField("id_num", StringType, true),
      StructField("id", StringType, true),
      StructField("address", StringType, true),
      StructField("tel_phone", StringType, true),
      StructField("phone", StringType, true),
      StructField("price_water", StringType, true),
      StructField("price_sewage_teatment", StringType, true)))

    val df = spark.read
      .format("com.crealytics.spark.excel")
//      .option("dataAddress", "'基础信息-自然人-自然人用水信息'!A4:T50") // Optional, default: "A1"
      .option("header", "true") // Required
      //      .option("treatEmptyValuesAsNulls", "false") // Optional, default: true
      //      .option("inferSchema", "false") // Optional, default: false
      //      .option("addColorColumns", "true") // Optional, default: false
      //      .option("timestampFormat", "MM-dd-yyyy HH:mm:ss") // Optional, default: yyyy-mm-dd hh:mm:ss[.fffffffff]
      //      .option("maxRowsInMemory", 20) // Optional, default None. If set, uses a streaming reader which can help with big files
      //      .option("excerptSize", 10) // Optional, default: 10. If set and if schema inferred, number of rows to infer schema from
      //      .option("workbookPassword", "pass") // Optional, default None. Requires unlimited strength JCE for older JVMs
      .schema(schema)
      .load("/Users/weiwei/dev/log/test.xlsx")

//    df.write.mode()

    df.show(3,2)
  }
}
