package wandalake.struckstreaming

import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import wanda.commd.DataGens

object ReadFile {
  var dataGen: DataGens = _
  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "E:\\hadoop-common-2.2.0-bin-master")
    val spark = SparkSession
      .builder
      .master("local[2]")
      .appName("StructuredNetworkWordCount")
      .getOrCreate()
    spark.sparkContext.setLogLevel("warn")
    val readFile = spark.read.format("delta")
      //    .option("replaceWhere", "date = '2019-01-01'")
      .load("E:\\tmp\\delta\\mysql03\\credit\\wf_task_ext_0")
    readFile.show()
    import spark.implicits._
    val kafkaDf = readFile.selectExpr("cast(value as string)").as[(String)]
    val dataFrame = spark.read.json(kafkaDf)
    dataFrame.show()
  }


}
