
package com.ibm.cps.spark.streaming

import com.ibm.cps.kafka.KafkaTopicConstructorForMultiTenants
import com.ibm.cps.message.MessageFactory
import com.ibm.cps.newmessage.{MetadataFactory, SerializableDataSourceMetadata}
import com.ibm.cps.spark.streaming.adapter.SparkEmbeddedCollector
import com.ibm.util.{LocalConfig, TopologyStreamIds}
import kafka.serializer.{DefaultDecoder, StringDecoder}
import org.apache.spark.SparkConf
import org.apache.spark.api.java.StorageLevels
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
* Created by telekinesis on 5/4/15.
*/
object SparkTopologyV2 {
  private var kafkaTopicConstructorForMultiTenants: KafkaTopicConstructorForMultiTenants = new KafkaTopicConstructorForMultiTenants

  def main(args: Array[String]) {
    if(args.length != 2){
      System.err.println("Illegal parameter list, the expected should be [tenantid, parallelism]")
      return
    }

    val tenantid = args(0)
    val parallelism = args(1).toInt
    val dataTopic = kafkaTopicConstructorForMultiTenants.getMessageTopic(tenantid)
    val processorTopic = kafkaTopicConstructorForMultiTenants.getMetadataAddTopic(tenantid)
    val processorDeleteTopic = kafkaTopicConstructorForMultiTenants.getMetadataDeleteTopic(tenantid)
    val datasourceAddTopic = kafkaTopicConstructorForMultiTenants.getDataSourceAddTopic(tenantid)
    val datasourceDeleteTopic = kafkaTopicConstructorForMultiTenants.getDataSourceDeleteTopic(tenantid)

    val conf = new SparkConf().setAppName("SparkTopology")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .setMaster("local[2]")
      .set("spark.local.dir", "./spark-local")
      .set("spark.eventLog.enabled", "true")
      .set("spark.eventLog.dir", "./spark-events")
      .set("spark.shuffle.manager", "SORT")

    val ssc = new StreamingContext(conf, Seconds(10))
    ssc.checkpoint("./checkpoint")

    val topics: Map[String, Int]
    = Map(
      dataTopic -> 1,
      processorTopic -> 1,
      processorDeleteTopic -> 1,
      datasourceAddTopic -> 1,
      datasourceDeleteTopic -> 1
    )

    val streams = (1 to parallelism).map(i => {
      def createKafkaParams(taskId : Int) : Map[String, String] = {
        Map(
          "zookeeper.connect" -> LocalConfig.ZOOKEEPER_HOST_PORT,
          "group.id" -> taskId.toString,
          "zookeeper.session.timeout.ms" -> "4000",
          "zookeeper.sync.time.ms" -> "200",
          "auto.commit.interval.ms" -> "1000"
        )
      }

      val params = createKafkaParams(i)
      val stream = KafkaStreamConnector.createStream[String, Array[Byte], StringDecoder, DefaultDecoder](ssc, params, topics, StorageLevels.MEMORY_AND_DISK)
      stream
    }
    )

    val FAKE_KEY_FOR_STATE_UPDATE = 1

    val dataDStream = ssc.union(streams).map[(Int, (String, Object))](
      message => {
        def processDataMessage(dataBytes : Array[Byte]): (Int, (String, Object)) ={
          val rawDataMessage = MessageFactory.produceDataMessage(dataBytes)
//          if(message != null){
//            (FAKE_KEY_FOR_STATE_UPDATE, (TopologyStreamIds.MESSAGE_SPOUT_STREAM, rawDataMessage))
          (FAKE_KEY_FOR_STATE_UPDATE, (TopologyStreamIds.RAW_DATA_STREAM, "rawDataMessage"))
//          }else {
//            None
//          }
        }

        def processAddProcessor(dataBytes : Array[Byte]) : (Int, (String, Object)) = {
          val str = new String(dataBytes)
          val abstractMetadata = MetadataFactory.parseJsonMetadata(str);
//          (FAKE_KEY_FOR_STATE_UPDATE, (TopologyStreamIds.PROCESSOR_SPOUT_STREAM, abstractMetadata))
          (FAKE_KEY_FOR_STATE_UPDATE, (TopologyStreamIds.PROCESSOR_SPOUT_STREAM, "abstractMetadata"))
        }

        def processDeleteProcessor(dataBytes : Array[Byte]) : (Int, (String, Object)) = {
          val str = new String(dataBytes)
          val abstractMetadata = MetadataFactory.parseJsonMetadata(str);
//          (FAKE_KEY_FOR_STATE_UPDATE, (TopologyStreamIds.PROCESSOR_DELETE_SPOUT_STREAM, abstractMetadata))
          (FAKE_KEY_FOR_STATE_UPDATE, (TopologyStreamIds.PROCESSOR_DELETE_SPOUT_STREAM, "abstractMetadata"))
        }

        def processAddDataSource(dataBytes : Array[Byte]) : (Int, (String, Object)) = {
          val str = new String(dataBytes)
          val dataSourceMetadata = new SerializableDataSourceMetadata(str);
//          (FAKE_KEY_FOR_STATE_UPDATE, (TopologyStreamIds.DATASOURCE_SPOUT_STREAM, dataSourceMetadata))
          (FAKE_KEY_FOR_STATE_UPDATE, (TopologyStreamIds.DATASOURCE_SPOUT_STREAM, "dataSourceMetadata"))
        }

        def processDeleteDataSource(dataBytes : Array[Byte]) : (Int, (String, Object)) = {
          val str = new String(dataBytes)
          val dataSourceMetadata = new SerializableDataSourceMetadata(str);
//          (FAKE_KEY_FOR_STATE_UPDATE, (TopologyStreamIds.DATASOURCE_DELETE_SPOUT_STREAM, dataSourceMetadata))
          (FAKE_KEY_FOR_STATE_UPDATE, (TopologyStreamIds.DATASOURCE_DELETE_SPOUT_STREAM, "dataSourceMetadata"))
        }

        val topic = message._1
        val dataBytes = message._2
        if(topic.equals(dataTopic)){
          processDataMessage(dataBytes)
        }else if(topic.equals(processorTopic)){
          processAddProcessor(dataBytes)
        }else if(topic.equals(processorDeleteTopic)){
          processDeleteProcessor(dataBytes)
        }else if(topic.equals(datasourceAddTopic)){
          processAddDataSource(dataBytes)
        }else{
          processDeleteDataSource(dataBytes)
        }

      }
    )

    def onlineProcessor = (newBatch: Seq[(String, Object)], embeddedTopology: Option[SparkEmbeddedCollector]) => {
      println("Start onlineProcessor()")
//      val embeddedSparkTopology : SparkEmbeddedCollector = embeddedTopology.getOrElse(EmbeddedTopologyCreator.create(tenantid, 0))
      for(message <- newBatch){
        val streamId = message._1
        val values = message._2
        println("Inside onlineProcessor, StreamId: " + streamId + " value: " + values)
//        embeddedSparkTopology.sendDataToBolt(streamId, values)
      }
//      Some(embeddedSparkTopology)
      None
    }

    val embeddedTopology = dataDStream.updateStateByKey[SparkEmbeddedCollector](onlineProcessor)
    embeddedTopology.print()

    ssc.start()
    ssc.awaitTermination()
  }
}

