package com.ibm.cps.spark.streaming

import backtype.storm.tuple.Values
import com.ibm.cps.kafka.KafkaTopicConstructorForMultiTenants
import com.ibm.cps.message.{MessageFactory, MessageFields}
import com.ibm.cps.newmessage.{MetadataFactory, SerializableDataSourceMetadata}
import com.ibm.cps.spark.streaming.adapter.SparkEmbeddedCollector
import com.ibm.util.{LocalConfig, TopologyStreamIds}
import kafka.serializer.{DefaultDecoder, StringDecoder}
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable.ArrayBuffer

/**
 * Created by gongxuan on 11/27/15.
 */
object SparkTopologyForNormalTenant {
  val logger = Logger.getLogger(SparkTopologyForNormalTenant.getClass)
  private var kafkaTopicConstructorForMultiTenants: KafkaTopicConstructorForMultiTenants = new KafkaTopicConstructorForMultiTenants

  def main(args: Array[String]) {
    if(args.length != 3){
      logger.error("Illegal parameter list, the expected should be [tenantid, parallelism, checkpointPath]")
      return
    }
//    val file = new File(args(2))
//    if(file.exists()) {
//      file.delete()
//    }
    val normalTent = MessageFields.NORMAL_TENANT
    val checkpointPath = args(2)
//    val normalTent = "normalTenant1"
    val dataTopic = kafkaTopicConstructorForMultiTenants.getMessageTopic(normalTent)
    val processorTopic = kafkaTopicConstructorForMultiTenants.getMetadataAddTopic(normalTent)
    val processorDeleteTopic = kafkaTopicConstructorForMultiTenants.getMetadataDeleteTopic(normalTent)
    val datasourceAddTopic = kafkaTopicConstructorForMultiTenants.getDataSourceAddTopic(normalTent)
    val datasourceDeleteTopic = kafkaTopicConstructorForMultiTenants.getDataSourceDeleteTopic(normalTent)
    val topicsSet = Set(dataTopic,processorTopic,processorDeleteTopic,datasourceAddTopic,datasourceDeleteTopic)


    val conf = new SparkConf().setAppName("SparkTopologyForNormalTenant")
  //    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
 //     .setMaster("local[5]")
      //      .set("spark.local.dir", "./spark-local")
      //      .set("spark.eventLog.enabled", "true")
      //      .set("spark.eventLog.dir", "./spark-events")
      .set("spark.shuffle.manager", "SORT")

    val ssc = StreamingContext.getOrCreate(checkpointPath, () => {
      val context = new StreamingContext(conf, Seconds(5))
      context.checkpoint(checkpointPath)

      val kafkaParams = Map[String, String](
        "metadata.broker.list" -> LocalConfig.KAFKA_BROKERS,
        "zookeeper.connect" -> LocalConfig.ZOOKEEPER_HOST_PORT,
        "zookeeper.session.timeout.ms" -> "8000",
        "zookeeper.sync.time.ms" -> "200",
        "auto.commit.interval.ms" -> "1000"
      )
  //    var offsetRanges = Array[OffsetRange]()
      val stream = KafkaUtils.createDirectStream[String, Array[Byte], StringDecoder, DefaultDecoder](
        context, kafkaParams, topicsSet)
//        .transform { rdd =>
//        offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
//        rdd
//      }
        .transform{ rdd =>
        rdd.mapPartitionsWithIndex[(Int,(String,Array[Byte]))]((i,message)=>
        {
          var array = ArrayBuffer[(Int,(String,Array[Byte]))]()
          while(message.hasNext)
          {
            array += ((i,message.next()))
          }
          array.toIterator
        }
        )
      }
      //      context.union(streams).print()

      def topologyRunner = (newBatch: Seq[(String, Array[Byte])],embeddedTopology:Option[SparkEmbeddedCollector]) => {
        logger.info("Start onlineProcessor()")
        val embeddedSparkTopology  = embeddedTopology.getOrElse(EmbeddedTopologyCreator.create(0))
        for(message <- newBatch){
//          logger.info("tenentid:"+MessageFactory.loadtenantId(message._1))
          def processDataMessage(dataBytes : Array[Byte]) = {
            val rawDataMessage = MessageFactory.produceDataMessage(dataBytes)
            if(rawDataMessage != null){
              embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.RAW_DATA_STREAM, new Values(rawDataMessage, rawDataMessage.getTsKey.asInstanceOf[AnyRef]))
            }
          }

          def processAddProcessor(dataBytes : Array[Byte]) = {
            val str = new String(dataBytes)
            val abstractMetadata = MetadataFactory.parseJsonMetadata(str);
            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.PROCESSOR_SPOUT_STREAM, new Values(abstractMetadata))
          }

          def processDeleteProcessor(dataBytes : Array[Byte]) = {
            val str = new String(dataBytes)
            val abstractMetadata = MetadataFactory.parseJsonMetadata(str);
            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.PROCESSOR_DELETE_SPOUT_STREAM, new Values(abstractMetadata))
          }

          def processAddDataSource(dataBytes : Array[Byte]) = {
            val str = new String(dataBytes)
            val dataSourceMetadata = new SerializableDataSourceMetadata(str);
            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.DATASOURCE_SPOUT_STREAM, new Values(dataSourceMetadata))
          }

          def processDeleteDataSource(dataBytes : Array[Byte]) = {
            val str = new String(dataBytes)
            val dataSourceMetadata = new SerializableDataSourceMetadata(str);
            embeddedSparkTopology.sendDataToBolt(TopologyStreamIds.DATASOURCE_DELETE_SPOUT_STREAM, new Values(dataSourceMetadata))
          }

          val topic = message._1
          val dataBytes = message._2
          if(topic.equals(dataTopic)){
            processDataMessage(dataBytes)
          }else if(topic.equals(processorTopic)){
            processAddProcessor(dataBytes)
          }else if(topic.equals(processorDeleteTopic)){
            processDeleteProcessor(dataBytes)
          }else if(topic.equals(datasourceAddTopic)){
            processAddDataSource(dataBytes)
          }else{
            processDeleteDataSource(dataBytes)
          }

        }
        Some(embeddedSparkTopology)
      }

      //      streams.foreach(stream => {
      //        stream.updateStateByKey(onlineProcessor).print()
      //      })

      val embeddedTopology = stream
        .updateStateByKey[SparkEmbeddedCollector](topologyRunner)
        .checkpoint((Seconds(10)))
      //        .map{case (taskId, topology) =>
      //        topology.sendDataToBolt(TopologyStreamIds.SPARK_BATCH_ENDING, new Values("Spark Batch Ended"))
      //      }
      embeddedTopology
        .foreachRDD((rdd, t) => {
        logger.info("~~~~~~~~~~RDD Depth: " + rdd.toDebugString.split("\n").length)
        logger.info("~~~~~~~~~~" + t.toString() + " element count: " + rdd.collect.length)
      })

      //      val finalWorkaroundStream = embeddedTopology.map{case (tenantId, topology) => (tenantId, topology)}.checkpoint(Seconds(30))
      //      finalWorkaroundStream.foreachRDD(rdd => logger.info(rdd.count()))

      //      embeddedTopology.foreachRDD{
      //        _.foreachPartition{_ => ()}
      //      }

      //      embeddedTopology.print()
      context
    })

    ssc.start()
    ssc.awaitTermination()
  }
}
