package wandalake.struckstreaming

import com.alibaba.fastjson.parser.Feature
import com.alibaba.fastjson.serializer.SerializerFeature
import com.alibaba.fastjson.{JSON, JSONObject}
import io.delta.tables.DeltaTable
import org.apache.avro.Schema
import org.apache.avro.generic.GenericData
import org.apache.hadoop.fs.FileSystem
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SparkSession, _}
import org.junit.rules.TemporaryFolder
import wanda.commd.DataGens
import wanda.toloplys.DataSourceTestUtils
import wanda.util.FSUtils

import scala.collection.JavaConversions._
import scala.collection.mutable


object ReadKafka {
  var sparks: SparkSession = _
  var dataGen: DataGens = _
  var tablePath: String = _
  var fs: FileSystem = _
  var dataBase: String = _
  var tableNmae: String = _
  var types: String = _
  var basePath:String = _
  def initialize() {

    sparks = SparkSession.builder
      .appName("Hoodie Datasource test")
      .master("local[2]")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate

    dataGen = new DataGens()
     val folder = new  TemporaryFolder
//    basePath = folder.getRoot.getAbsolutePath
    // tablePath = "D:\\data\\tmp5"
   // fs = FSUtils.getFs(basePath, sparks.sparkContext.hadoopConfiguration)
  }

  // Function to upsert `microBatchOutputDF` into Delta table using merge
  def upsertToDelta(batchDF: Dataset[mutable.Map[String, String]], batchId: Long) {
    //    val deltaTable = io.delta.tables.DeltaTable.forPath(sparks,"E:\\tmp\\delta\\mysql03")
    println("batchDF:"+batchDF.foreach(
      x =>
        println( x.values)


    ))
    var spark: SparkSession = SparkSession.builder.appName("Hoodie Spark Streaming APP")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .master("local[1]")
      .getOrCreate


    if (!batchDF.isEmpty) {

      val cols: Array[String] = batchDF.take(1).flatMap(_.keys)
      val rddRow: RDD[Row] = batchDF.rdd.filter(_.nonEmpty).map { m: mutable.Map[String, String] =>
        val seq = m.values.toSeq
        Row.fromSeq(seq)
      }

      val fields: Array[StructField] = cols.map(fieldName => StructField(fieldName, StringType, nullable = true))
      println(fields)
      val schemad: StructType = StructType(fields)
      println(schemad)
      val sparkSql: DataFrame = spark.createDataFrame(rddRow, schemad)


      if (types.equals("update")) {
        val deltaTable = DeltaTable.forPath(spark, "D:\\data\\tmp\\mysql03\\" + dataBase.toString + "\\" + tableNmae.toString)
        deltaTable.as("t")
          .merge(
            sparkSql.as("s"),
            "s.id = t.id")
          .whenMatched().updateAll()
          //   .whenNotMatched().insertAll()
          .execute()


      } else if (types.equals("delete")) {
        val deltaTable = DeltaTable.forPath(spark, "D:\\data\\tmp\\mysql03\\" + dataBase.toString + "\\" + tableNmae.toString)
        import org.apache.spark.sql.functions
        deltaTable.delete(functions.col("id").equalTo(functions.lit("10")).&&(
          functions.col("partition").equalTo(functions.lit("2019-08-19"))

        ))

      }
      else if (types.equals("insert")) {
        println("dataBase："+dataBase.toString +tableNmae.toString)
        sparkSql
          .write
          .format("delta")
          .mode("append")
          .option("mergeSchema", "true")
          .partitionBy("partition")
          .save("D:\\data\\tmp\\mysql03\\" + dataBase.toString + "\\" + tableNmae.toString)

      }

    }


  }


  def readKafkaStructured(spark: SparkSession): Dataset[mutable.Map[String, String]] = {
    import spark.implicits._
    val df = spark.readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "10.161.16.199:9092")
      .option("subscribe", "tt_03")
      .option("maxOffsetsPerTrigger", "200")
      .load()

    val kafkaDf = df.selectExpr("cast(value as string)").as[(String)]
    kafkaDf.schema

    val dataList = kafkaDf.flatMap {
      value =>
        println(value)
        var listData:List[String] = List()
        //        val json = String.valueOf(value)
        val jsonObject = JSON.parseObject(value, Feature.OrderedField)
        types = jsonObject.getOrDefault("type", "null").toString
        dataBase = jsonObject.getOrDefault("database", "null").toString
        tableNmae = jsonObject.getOrDefault("table", "null").toString
        if (jsonObject.containsKey("data") && types.equals("insert")) {


          //        if (jsonObject.containsKey("data") && types.equals("insert")) {
          val insertText = JSON.toJSONString(jsonObject.get("data"), SerializerFeature.WriteMapNullValue, SerializerFeature.WriteNullListAsEmpty)
          val dataInsert: JSONObject = JSON.parseObject(insertText, Feature.OrderedField)

          //get myql filed
          var filedInsert = ""
          for (key <- dataInsert.keySet()) {
            filedInsert += "{\"name\": \"" + key + "\",\"type\": \"double\"}" + ","
          }
          val TRIP_EXAMPLE_SCHEMA = "{\"type\": \"record\"," + "\"name\": \"triprec\"," + "\"fields\": [ " +
            "{\"name\": \"timestamp\",\"type\": \"double\"}," +
            "{\"name\": \"primaryKey\", \"type\": \"string\"}," + filedInsert.substring(0, filedInsert.length() - 1) + "]}"
          val avroSchema = new Schema.Parser().parse(TRIP_EXAMPLE_SCHEMA)
          println("avroSchema:" + avroSchema)
          val rec = new GenericData.Record(avroSchema)
          val key: String = dataInsert.get("id").toString
          rec.put("primaryKey", key); //add key
          rec.put("timestamp", "30198787")
          val jsonKey = dataInsert.keySet()
          val iter = jsonKey.iterator
          while (iter.hasNext) {
            val instance = iter.next()
            val value = dataInsert.getOrDefault(instance, "null").toString
            println("key: " + instance + " value:" + value)
            rec.put(instance, value)

          }

          val allRecords = dataGen.generateInserts(key /* ignore */ , 1, rec)

          listData = DataSourceTestUtils.convertToStringList(allRecords).toList
        }

          println(listData + "records1records1records1")

          val mapJson: List[mutable.Map[String, String]] = listData//.filter(x=>x.isEmpty)
            .map {
            jsonData =>

              val jsonObj = JSON.parseObject(jsonData, Feature.OrderedField)
              val jsonKey = jsonObj.keySet()
              val iter = jsonKey.iterator()
              //  var mapValue: mutable.HashMap[String, String] = new mutable.HashMap[String, String]()
              val mapValue: mutable.Map[String, String] = scala.collection.mutable.LinkedHashMap[String, String]()
              while (iter.hasNext) {
                val instance = iter.next()
                val value = jsonObj.get(instance).toString
                mapValue.put(instance, value)
                println("===key====：" + instance + "===value===：" + value)
              }
              mapValue
          }

          mapJson
        }

        return dataList


  }


  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "E:\\hadoop-common-2.2.0-bin-master")
    initialize()
    val spark: SparkSession = SparkSession.builder.appName("Hoodie Spark Streaming APP")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .master("local[1]")
      .getOrCreate

    val dataSql = readKafkaStructured(spark)
    dataSql.writeStream
      .format("delta")
      .trigger(Trigger.ProcessingTime("2 seconds"))
      .foreachBatch(upsertToDelta _)
      .outputMode("update")
      .option("checkpointLocation", "D:\\data\\tmp\\mysql03\\checkpoints")
      .start()
      .awaitTermination()

  }

}
