
package com.ibm.cps.spark.streaming

import backtype.storm.tuple.Values
import com.ibm.cps.kafka.KafkaTopicConstructorForMultiTenants
import com.ibm.cps.message.MessageFactory
import com.ibm.cps.newmessage.{MetadataFactory, SerializableDataSourceMetadata}
import com.ibm.cps.spark.streaming.adapter.SparkEmbeddedCollector
import com.ibm.util.{LocalConfig, TopologyStreamIds}
import kafka.serializer.{DefaultDecoder, StringDecoder}
import org.apache.spark.SparkConf
import org.apache.spark.api.java.StorageLevels
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * Created by telekinesis on 5/4/15.
 */
object SparkTopologyWithoutCheckpoint {
  private var kafkaTopicConstructorForMultiTenants: KafkaTopicConstructorForMultiTenants = new KafkaTopicConstructorForMultiTenants

  def main(args: Array[String]) {
    if(args.length != 2){
      System.err.println("Illegal parameter list, the expected should be [tenantid, parallelism]")
      return
    }

    val tenantid = args(0)
    val parallelism = args(1).toInt
    val dataTopic = kafkaTopicConstructorForMultiTenants.getMessageTopic(tenantid)
    val processorTopic = kafkaTopicConstructorForMultiTenants.getMetadataAddTopic(tenantid)
    val processorDeleteTopic = kafkaTopicConstructorForMultiTenants.getMetadataDeleteTopic(tenantid)
    val datasourceAddTopic = kafkaTopicConstructorForMultiTenants.getDataSourceAddTopic(tenantid)
    val datasourceDeleteTopic = kafkaTopicConstructorForMultiTenants.getDataSourceDeleteTopic(tenantid)

    val conf = new SparkConf().setAppName("SparkTopology")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //      .setMaster("local[2]")
      //      .set("spark.local.dir", "./spark-local")
      //      .set("spark.eventLog.enabled", "true")
      //      .set("spark.eventLog.dir", "./spark-events")
      .set("spark.shuffle.manager", "SORT")


    val ssc = new StreamingContext(conf, Seconds(10))
    ssc.checkpoint("checkpoint")

      val topics: Map[String, Int]
      = Map(
        dataTopic -> 1,
        processorTopic -> 1,
        processorDeleteTopic -> 1,
        datasourceAddTopic -> 1,
        datasourceDeleteTopic -> 1
      )

      val streams = (1 to parallelism).map(i => {
        def createKafkaParams(taskId : Int) : Map[String, String] = {
          Map(
            "zookeeper.connect" -> LocalConfig.ZOOKEEPER_HOST_PORT,
            "group.id" -> taskId.toString,
            "zookeeper.session.timeout.ms" -> "4000",
            "zookeeper.sync.time.ms" -> "200",
            "auto.commit.interval.ms" -> "1000"
          )
        }

        val params = createKafkaParams(i)
        val stream = KafkaStreamConnector.createStream[String, Array[Byte], StringDecoder, DefaultDecoder](ssc, params, topics, StorageLevels.MEMORY_AND_DISK)
          .map[(Int, (String, Array[Byte]))](message => (i, message))

//        val stream = KafkaUtils.createStream[String, Array[Byte], StringDecoder, DefaultDecoder](context, params, topics, StorageLevels.MEMORY_AND_DISK)

        stream
      }
      )

//      context.union(streams).print()

      def topologyRunner = (newBatch: Seq[(String, Array[Byte])], embeddedTopology: Option[SparkEmbeddedCollector]) => {
        println("Start onlineProcessor()")
        val embeddedSparkTopology : SparkEmbeddedCollector = embeddedTopology.getOrElse(EmbeddedTopologyCreator.create(tenantid, 0))
        for(message <- newBatch){
          def processDataMessage(dataBytes : Array[Byte]) = {
            val rawDataMessage = MessageFactory.produceDataMessage(dataBytes)
            if(rawDataMessage != null){
              embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.RAW_DATA_STREAM, new Values(rawDataMessage, rawDataMessage.getTsKey.asInstanceOf[AnyRef]))
            }
          }

          def processAddProcessor(dataBytes : Array[Byte]) = {
            val str = new String(dataBytes)
            val abstractMetadata = MetadataFactory.parseJsonMetadata(str);
            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.PROCESSOR_SPOUT_STREAM, new Values(abstractMetadata))
          }

          def processDeleteProcessor(dataBytes : Array[Byte]) = {
            val str = new String(dataBytes)
            val abstractMetadata = MetadataFactory.parseJsonMetadata(str);
            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.PROCESSOR_DELETE_SPOUT_STREAM, new Values(abstractMetadata))
          }

          def processAddDataSource(dataBytes : Array[Byte]) = {
            val str = new String(dataBytes)
            val dataSourceMetadata = new SerializableDataSourceMetadata(str);
            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.DATASOURCE_SPOUT_STREAM, new Values(dataSourceMetadata))
          }

          def processDeleteDataSource(dataBytes : Array[Byte]) = {
            val str = new String(dataBytes)
            val dataSourceMetadata = new SerializableDataSourceMetadata(str);
            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.DATASOURCE_DELETE_SPOUT_STREAM, new Values(dataSourceMetadata))
          }

          val topic = message._1
          val dataBytes = message._2

          if(topic.equals(dataTopic)){
            processDataMessage(dataBytes)
          }else if(topic.equals(processorTopic)){
            processAddProcessor(dataBytes)
          }else if(topic.equals(processorDeleteTopic)){
            processDeleteProcessor(dataBytes)
          }else if(topic.equals(datasourceAddTopic)){
            processAddDataSource(dataBytes)
          }else{
            processDeleteDataSource(dataBytes)
          }

        }
        Some(embeddedSparkTopology)
      }

//      streams.foreach(stream => {
//        stream.updateStateByKey(onlineProcessor).print()
//      })

      val embeddedTopology = ssc.union(streams).updateStateByKey[SparkEmbeddedCollector](topologyRunner)
      embeddedTopology.print()

      ssc.start()
      ssc.awaitTermination()
  }
}

