package cn.doitedu.batchjobs

import org.apache.spark.sql.SparkSession

import java.sql.Timestamp

object TestTimestamp {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("烧成灰我都能写")
      .master("local")
      .config("spark.sql.shuffle.partitions", "1")
      .enableHiveSupport()
      .getOrCreate()

    // create table ts(a timestamp) stored as orc;
    val frame = spark.read.table("test.ts")
    frame.rdd.map(row=>{
      val timestamp: java.sql.Timestamp = row.getTimestamp(0)
      timestamp
    }).foreach(println)

    spark.close()

  }

}
