//package com.ibm.cps.spark.streaming
//
//import backtype.storm.tuple.Values
//import com.ibm.cps.kafka.KafkaTopicConstructorForMultiTenants
//import com.ibm.cps.kafka.KafkaTopicConstructorForMultiTenants
//import com.ibm.cps.message.MessageFactory
//import com.ibm.cps.message.MessageFactory
//import com.ibm.cps.newmessage.MetadataFactory
//import com.ibm.cps.newmessage.SerializableDataSourceMetadata
//import com.ibm.cps.newmessage.{MetadataFactory, SerializableDataSourceMetadata}
//import com.ibm.cps.spark.streaming.adapter.SparkEmbeddedCollector
//import com.ibm.cps.spark.streaming.adapter.SparkEmbeddedCollector
//import com.ibm.util.LocalConfig
//import com.ibm.util.TopologyStreamIds
//import com.ibm.util.{LocalConfig, TopologyStreamIds}
//import kafka.serializer.{DefaultDecoder, StringDecoder}
//import org.apache.log4j.Logger
//import org.apache.spark.SparkConf
//import org.apache.spark.streaming.kafka.KafkaUtils
//import org.apache.spark.streaming.{Seconds, StreamingContext}
//
//import scala.collection.mutable.ArrayBuffer
//
///**
// * Created by gongxuan on 11/27/15.
// */
//object TestSparkForNormalTenant {
//  val logger = Logger.getLogger(SparkTopologyForNormalTenant.getClass)
//  private var kafkaTopicConstructorForMultiTenants: KafkaTopicConstructorForMultiTenants = new KafkaTopicConstructorForMultiTenants
//
//  def main(args: Array[String]) {
//    if(args.length != 3){
//      logger.error("Illegal parameter list, the expected should be [tenantid, parallelism, checkpointPath]")
//      return
//    }
//    val normalTent = args(0)
//    val checkpointPath = args(2)
//    //    val normalTent = "normalTenant1"
//    val dataTopic = kafkaTopicConstructorForMultiTenants.getMessageTopic(normalTent)
//    val processorTopic = kafkaTopicConstructorForMultiTenants.getMetadataAddTopic(normalTent)
//    val processorDeleteTopic = kafkaTopicConstructorForMultiTenants.getMetadataDeleteTopic(normalTent)
//    val datasourceAddTopic = kafkaTopicConstructorForMultiTenants.getDataSourceAddTopic(normalTent)
//    val datasourceDeleteTopic = kafkaTopicConstructorForMultiTenants.getDataSourceDeleteTopic(normalTent)
//    val topicsSet = Set(dataTopic,processorTopic,processorDeleteTopic,datasourceAddTopic,datasourceDeleteTopic)
//    println(dataTopic)
//    println(processorTopic)
//    println(processorDeleteTopic)
//    println(datasourceAddTopic)
//    println(datasourceDeleteTopic)
//    //    val topiciterator = topicArray.iterator
//    //    val dataTopic = topiciterator.next()
//    //    val processorTopic = topiciterator.next()
//    //    val processorDeleteTopic = topiciterator.next()
//    //    val datasourceAddTopic = topiciterator.next()
//    //    val datasourceDeleteTopic = topiciterator.next()
//
//    val conf = new SparkConf().setAppName("SparkTopologyForNormalTenant")
//      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
//      .setMaster("local[5]")
//      //      .set("spark.local.dir", "./spark-local")
//      //      .set("spark.eventLog.enabled", "true")
//      //      .set("spark.eventLog.dir", "./spark-events")
//      .set("spark.shuffle.manager", "SORT")
//
//    //    val topics: Map[String, Int]
//    //    = Map(
//    //      dataTopic -> 1,
//    //      processorTopic -> 1,
//    //      processorDeleteTopic -> 1,
//    //      datasourceAddTopic -> 1,
//    //      datasourceDeleteTopic -> 1
//    //    )
//    val ssc = StreamingContext.getOrCreate(checkpointPath, () => {
//      val context = new StreamingContext(conf, Seconds(5))
//      context.checkpoint(checkpointPath)
//
//      val kafkaParams = Map[String, String](
//        "metadata.broker.list" -> LocalConfig.KAFKA_BROKERS,
//        "zookeeper.connect" -> LocalConfig.ZOOKEEPER_HOST_PORT,
//        "zookeeper.session.timeout.ms" -> "8000",
//        "zookeeper.sync.time.ms" -> "200",
//        "auto.commit.interval.ms" -> "1000"
//      )
//      //    var offsetRanges = Array[OffsetRange]()
//      val stream = KafkaUtils.createDirectStream[String, Array[Byte], StringDecoder, DefaultDecoder](
//        context, kafkaParams, topicsSet)
//        //        .transform { rdd =>
//        //        offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
//        //        rdd
//        //      }
//        .transform{ rdd =>
//        rdd.mapPartitionsWithIndex[(Int,(String,Array[Byte]))]((i,message)=>
//        {
//          var array = ArrayBuffer[(Int,(String,Array[Byte]))]()
//          while(message.hasNext)
//          {
//            array += ((i,message.next()))
//          }
//          array.toIterator
//        }
//        )
//      }
//      //      context.union(streams).print()
//
//      def topologyRunner = (newBatch: Seq[(String, Array[Byte])],embeddedTopologyMap : scala.collection.mutable.Map[String,Option[SparkEmbeddedCollector]]) => {
//        logger.info("Start onlineProcessor()")
//
//        for(message <- newBatch){
//          logger.info("tenentid:"+MessageFactory.loadtenantId(message._1))
//          val messageTenantId:String = MessageFactory.loadtenantId(message._1)
//          val tenantids = embeddedTopologyMap.keySet
//          var embeddedTopology:Option[SparkEmbeddedCollector] = null
//          val embeddedSparkTopology : SparkEmbeddedCollector = {
//            if(tenantids.contains(messageTenantId)){
//
//            }else{
//              embeddedTopology = embeddedTopologyMap.get(messageTenantId).asInstanceOf[Option[SparkEmbeddedCollector]]
//
//              embeddedTopologyMap += messageTenantId->embeddedTopology
//              embeddedTopology.getOrElse(EmbeddedTopologyCreator.create(messageTenantId, 0))
//            }
//          }
//          //          if(tenantids.contains(messageTenantId)){
//          //            embeddedTopology = embeddedTopologyMap.get(messageTenantId).asInstanceOf[Option[SparkEmbeddedCollector]]
//          //            val embeddedSparkTopology  = embeddedTopology.getOrElse(EmbeddedTopologyCreator.create(messageTenantId, 0))
//          //          }else{
//          //            embeddedTopologyMap += messageTenantId->embeddedTopology
//          //            val embeddedSparkTopology  = embeddedTopology.getOrElse(EmbeddedTopologyCreator.create(messageTenantId, 0))
//          //          }
//
//          def processDataMessage(dataBytes : Array[Byte]) = {
//            val rawDataMessage = MessageFactory.produceDataMessage(dataBytes)
//            if(rawDataMessage != null){
//              embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.RAW_DATA_STREAM, new Values(rawDataMessage, rawDataMessage.getTsKey))
//            }
//          }
//
//          def processAddProcessor(dataBytes : Array[Byte]) = {
//            val str = new String(dataBytes)
//            val abstractMetadata = MetadataFactory.parseJsonMetadata(str);
//            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.PROCESSOR_SPOUT_STREAM, new Values(abstractMetadata))
//          }
//
//          def processDeleteProcessor(dataBytes : Array[Byte]) = {
//            val str = new String(dataBytes)
//            val abstractMetadata = MetadataFactory.parseJsonMetadata(str);
//            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.PROCESSOR_DELETE_SPOUT_STREAM, new Values(abstractMetadata))
//          }
//
//          def processAddDataSource(dataBytes : Array[Byte]) = {
//            val str = new String(dataBytes)
//            val dataSourceMetadata = new SerializableDataSourceMetadata(str);
//            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.DATASOURCE_SPOUT_STREAM, new Values(dataSourceMetadata))
//          }
//
//          def processDeleteDataSource(dataBytes : Array[Byte]) = {
//            val str = new String(dataBytes)
//            val dataSourceMetadata = new SerializableDataSourceMetadata(str);
//            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.DATASOURCE_DELETE_SPOUT_STREAM, new Values(dataSourceMetadata))
//          }
//
//          val topic = message._1
//          val dataBytes = message._2
//          if(topic.equals(dataTopic)){
//            processDataMessage(dataBytes)
//          }else if(topic.equals(processorTopic)){
//            processAddProcessor(dataBytes)
//          }else if(topic.equals(processorDeleteTopic)){
//            processDeleteProcessor(dataBytes)
//          }else if(topic.equals(datasourceAddTopic)){
//            processAddDataSource(dataBytes)
//          }else{
//            processDeleteDataSource(dataBytes)
//          }
//
//        }
//        Some(embeddedSparkTopology)
//      }
//
//      //      streams.foreach(stream => {
//      //        stream.updateStateByKey(onlineProcessor).print()
//      //      })
//
//      val embeddedTopology = stream
//        .updateStateByKey[SparkEmbeddedCollector](topologyRunner)
//        .checkpoint((Seconds(10)))
//      //        .map{case (taskId, topology) =>
//      //        topology.sendDataToBolt(TopologyStreamIds.SPARK_BATCH_ENDING, new Values("Spark Batch Ended"))
//      //      }
//      embeddedTopology
//        .foreachRDD((rdd, t) => {
//        logger.info("~~~~~~~~~~RDD Depth: " + rdd.toDebugString.split("\n").length)
//        logger.info("~~~~~~~~~~" + t.toString() + " element count: " + rdd.collect.length)
//      })
//
//      //      val finalWorkaroundStream = embeddedTopology.map{case (tenantId, topology) => (tenantId, topology)}.checkpoint(Seconds(30))
//      //      finalWorkaroundStream.foreachRDD(rdd => logger.info(rdd.count()))
//
//      //      embeddedTopology.foreachRDD{
//      //        _.foreachPartition{_ => ()}
//      //      }
//
//      //      embeddedTopology.print()
//      context
//    })
//
//    ssc.start()
//    ssc.awaitTermination()
//  }
//}