package com.edata.bigdata.kafka

import com.edata.bigdata.annotation.Edata_Executor
import com.edata.bigdata.util.{EDataUtils, Executor}
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.common.serialization.StringSerializer
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

import java.util.Properties
@Edata_Executor(target = "KFPROEXECUTOR")
class SparkKafkaProducer[K, V] extends Executor {

  override var SESSION: SparkSession = _
  private val kafkaParam: Properties = new Properties()
  @volatile private var producer: Broadcast[KafkaProducerSinks[K, V]] = null

  override def initialize(): Unit = {
    kafkaParam.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KF_PRO_BOOTSTRAP)
    kafkaParam.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer].getName)
    kafkaParam.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer].getName)
  }

  override def findDataFromPath[T](path: String, seperator: String)(implicit bean: Manifest[T]): DataFrame = {
    val rdd = SESSION.sparkContext.textFile(path)
    LOGGER.info(s"finding data from ${path}")
    createDataFrame(rdd, seperator)
  }

  override def saveDataToPath[T](data: RDD[String], path: String)(implicit bean: Manifest[T]): Unit = {
    LOGGER.info(s"saving data to ${path}")
    data.saveAsTextFile(path)
  }

  override def findData[T](args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    findDataFromPath(args(0),args(1))
  }

  override def saveData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    data.saveAsTextFile(args(0))

  }

  override def updateData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {
    LOGGER.warn(s"This method is deprecated")
  }

  override def findDataById[T](value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    val className = bean.toString()
    val (_,id, _) = find_SCHEMA(className)
    val k_t = id.split(":")
    val data = findDataFromPath(args(0),args(1))
    data.filter(id + "==" + EDataUtils.convertVToQueryStrByDataType(k_t(0), k_t(1)))
  }

  override def findDataByProperty[T](key: String, value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    val data = findDataFromPath(args(0),args(1))
    data.filter(s"${key}=${value}")
  }


  def createProducer() = {
    if (producer == null) {
      val sc = SESSION.sparkContext
      synchronized {
        if (producer == null) {
          producer = sc.broadcast(KafkaProducerSinks[K, V](kafkaParam))
        }
      }
    }
  }

  def sendData(rdd: RDD[V]): Unit = {
    val prod = producer.value
    rdd.foreach(record => {
      prod.send(KF_PRO_TOPIC, record)
    })

  }

}
