package wanda.kafka

import java.io.File
import java.time.LocalDateTime
import com.alibaba.fastjson.parser.Feature
import com.alibaba.fastjson.{JSON, JSONObject, TypeReference}
import io.delta.tables.DeltaTable
import org.apache.avro.Schema
import org.apache.avro.generic.GenericData
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.api.java.JavaSparkContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql._
import org.apache.spark.sql.delta.DeltaTableUtils
import org.apache.spark.util.CollectionAccumulator
import org.junit.rules.TemporaryFolder
import wanda.commd.DataGens
import wanda.toloplys.DataSourceTestUtils
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
import scala.collection.{immutable, mutable}


object KafkaHelper {

  lazy val log = org.apache.log4j.LogManager.getLogger("KafkaHelper")
  var sparks: SparkSession = _
  var dataGen: DataGens = _
  var fs: FileSystem = _
  var basePath: String = _
  var partition: String = _
  var deleteRang: String = _
  var jssc: JavaSparkContext = _

  def initialize(locationPath: String, localPartition: String, deletePartitionMode: String, rang: String) {

    sparks = SparkSession.builder.appName("Hoodie Spark Streaming APP")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .master("local[*]")
      .getOrCreate


    if (deletePartitionMode.equals("auto")) {
      partition = DataGens.DEFAULT_SECOND_PARTITION_PATH
    }
    else {
      partition = localPartition
    }

    deleteRang = rang
    dataGen = new DataGens()
    val folder = new TemporaryFolder
    folder.create
    basePath = locationPath
    //basePath = folder.getRoot.getAbsolutePath
    jssc = new JavaSparkContext(sparks.sparkContext)
    fs = FileSystem.get(jssc.hadoopConfiguration())
    // fs.mkdirs(new Path(basePath))
    // fs = FSUtils.getFs(basePath, jssc.hadoopConfiguration())

  }

  // Function to upsert `microBatchOutputDF` into Delta table using merge

  def isChartPathExist(dirPath: String): Unit = {
    val file = new File(dirPath)
    if (!file.exists()) {
      file.mkdirs()
    }

  }


  def list2Tuple7(list: Array[String]): (String, String, String, String, String, String, String) = {
    val t = list match {
      case Array(a, b, c, d, e, f, g) => (a, b, c, d, e, f, g)
      case _ => ("", "", "", "", "", "", "")
    }
    t
  }


  def upsertToDelta(batchDF: Dataset[mutable.Map[String, String]], batchId: Long) {

    batchDF.persist()
    if (!batchDF.isEmpty) {
      val accumulator: CollectionAccumulator[Array[String]] = sparks.sparkContext.collectionAccumulator[Array[String]]
      batchDF.foreachPartition {
        rdd =>
          rdd.foreach {
            jsonMap: mutable.Map[String, String] =>

              val dataBase = jsonMap("dataBase")
              val types = jsonMap("type")
              val tableName = jsonMap("tableName")
              val value = jsonMap("value")
              val timesMap = jsonMap("timestamp")
              val key = jsonMap("primaryKey")
              val partition = jsonMap("partition")
              val arrayString = dataBase + ":::" + types + ":::" + tableName + ":::" + value + ":::" + timesMap + ":::" + key + ":::" + partition

              val arrays: Array[String] = arrayString.split(":::")

              println(arrays.mkString("||"))
              accumulator.add(arrays)


          }

      }

      val cols: Array[String] = batchDF.take(1).flatMap(_.keys)

      for (table <- accumulator.value) {

        println("tabletable " + table.mkString(" & "))

        val deltaDataBase = table(0)

        val tableForm = table(1)

        println("tableForm:" + tableForm)

        val deltaTableName = table(2)

        val filterSql = sparks.createDataFrame(Seq(
          list2Tuple7(table)
        )) toDF(cols(0), cols(1), cols(2), cols(3), cols(4), cols(5), cols(6))
        println(filterSql.show())

        if (tableForm.equals("update")) {

          val updatePath = basePath + deltaDataBase + "/" + deltaTableName

          log.info("update:" + updatePath)

          if (DeltaTableUtils.isDeltaTable(sparks, new Path(updatePath))) {

            val deltaTable = DeltaTable.forPath(sparks, updatePath)

            deltaTable.as("events")
              .merge(
                filterSql.as("updates"),
                "events.primaryKey = updates.primaryKey")
              .whenMatched().updateAll()
              //   .whenNotMatched().insertAll()
              .execute()
          }


        } else if (tableForm.equals("delete")) {

          val jssc = new JavaSparkContext(sparks.sparkContext)

          import org.apache.spark.sql.functions

          val primaryValue: DataFrame = filterSql.selectExpr("primaryKey")

          log.info("primaryValue:" + primaryValue)

          val rddId: RDD[String] = primaryValue.rdd.map {
            row: Row =>
              row.getAs[String]("primaryKey")

          }

          val listId = new ListBuffer[String]

          val broadcast = jssc.broadcast(listId)

          rddId.foreach(
            (value: String) =>
              broadcast.value.add(value)

          )

          log.info("mutableSet" + listId)

          for (i <- 0 until listId.length) {

            val deletePath: String = basePath + deltaDataBase + "/" + deltaTableName
            println("deletePath:" + deletePath)

            if (DeltaTableUtils.isDeltaTable(sparks, new Path(deletePath))) {

              val deltaTable = DeltaTable.forPath(sparks, deletePath)

              deleteRang match {
                case "=" =>
                  deltaTable.delete(functions.col("primaryKey").equalTo(functions.lit(listId.get(i)))
                    // .&&(functions.col("partition").equalTo(functions.lit(partition)))
                  )
                case ">" =>
                  deltaTable.delete(functions.col("primaryKey").equalTo(functions.lit(listId.get(i)))
                    .&&(functions.col("partition").gt(functions.lit(partition))

                    ))
                case "<" =>
                  deltaTable.delete(functions.col("primaryKey").equalTo(functions.lit(listId.get(i)))
                    .&&(functions.col("partition").lt(functions.lit(partition))

                    ))

                case _ =>
                  log.info("ERORR：匹配删除范围错误")

              }
            }

          }

        }
        else if (tableForm.equals("insert")) {

          val insertPath = basePath + deltaDataBase + "/" + deltaTableName
          println("insertPath:" + insertPath)
          filterSql
            .write
            .format("delta")
            .mode("append")
            .option("mergeSchema", "true")
            .partitionBy("partition")
            .save(insertPath)

        }

        else if (tableForm.equals("table-drop")) {
          val dropTable = basePath + deltaDataBase + "/" + deltaTableName
          val renameDropTable = basePath + deltaDataBase + "/" + deltaTableName + "-drop"
          val pathSrcPath = new Path(dropTable);
          log.info("pathSrcPath:" + pathSrcPath)
          val pathDstPath = new Path(renameDropTable);
          log.info("pathDstPath:" + pathDstPath)
          fs.rename(pathSrcPath, pathDstPath)

        }
        else if (tableForm.equals("database-drop")) {
          val dropTable = basePath + deltaDataBase
          val renameDropTable = basePath + deltaDataBase + "-drop"
          val pathSrcPath = new Path(dropTable);
          val pathDstPath = new Path(renameDropTable);
          fs.rename(pathSrcPath, pathDstPath)

        }

      }

    }

    batchDF.unpersist()
  }

  def StructuredFromKafka(spark: SparkSession, df: DataFrame) = {

    import spark.implicits._

    val kafkaDf = df.selectExpr("cast(value as string)").as[(String)]

    val dataList = kafkaDf.flatMap {
      value =>
        log.info(value)
        var listData: List[String] = List()
        val jsonObject = JSON.parseObject(value, Feature.OrderedField)
        val types = jsonObject.getOrDefault("type", "null").toString
        val dataBase = jsonObject.getOrDefault("database", "null").toString
        val tableNmae = jsonObject.getOrDefault("table", "null").toString
        val primaryKeyValue = jsonObject.getOrDefault("primary_key", "null").toString

        if (jsonObject.containsKey("data")) {

          val value = jsonObject.getOrDefault("data", "null").toString

          val TRIP_EXAMPLE_SCHEMA = "{\"type\": \"record\"," + "\"name\": \"triprec\"," + "\"fields\": [ " +
            "{\"name\": \"dataBase\",\"type\": \"string\"}," +
            "{\"name\": \"tableName\",\"type\": \"string\"}," +
            "{\"name\": \"type\",\"type\": \"string\"}," +
            "{\"name\": \"timestamp\",\"type\": \"string\"}," +
            "{\"name\": \"primaryKey\", \"type\": \"string\"}," +
            "{\"name\": \"value\", \"type\": \"string\"}]}"
          val avroSchema = new Schema.Parser().parse(TRIP_EXAMPLE_SCHEMA)
          log.info("avroSchema:" + avroSchema)
          val rec = new GenericData.Record(avroSchema)
          rec.put("timestamp", LocalDateTime.now.toString)
          rec.put("primaryKey", primaryKeyValue); //add keygetOrDefault
          rec.put("dataBase", dataBase);
          rec.put("tableName", tableNmae);
          rec.put("type", types);
          rec.put("value", value)
          println("rec" + rec)

          val allRecords = dataGen.generateInserts(primaryKeyValue /* ignore */ , 1, rec)
          listData = DataSourceTestUtils.convertToStringList(allRecords).toList
        }
        log.info("records:" + listData)

        val mapJson: List[mutable.Map[String, String]] = listData.filterNot(x => x.isEmpty)
          .map {
            jsonData =>
              log.info("jsonData:" + jsonData)
              val jsonObj = JSON.parseObject(jsonData, Feature.OrderedField)
              val jsonKey = jsonObj.keySet()
              val iter = jsonKey.iterator()
              val mapValue: mutable.Map[String, String] = scala.collection.mutable.LinkedHashMap[String, String]()
              while (iter.hasNext) {
                val instance = iter.next()
                val singleValue = jsonObj.get(instance)
                var values: String = "null"
                if (singleValue != null)
                  values = singleValue.toString
                mapValue.put(instance, values)
                log.info("===key====：" + instance + "===value===：" + values)
              }
              mapValue
          }
        mapJson
    }
    dataList

  }

}
