/*
 * Copyright 2018 Analytics Zoo Authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.intel.analytics.zoo.examples.streaming.textclassification

import com.intel.analytics.zoo.common.NNContext
import com.intel.analytics.zoo.feature.text.{TextFeature, TextSet}
import com.intel.analytics.zoo.models.textclassification.TextClassifier
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric.NumericFloat
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}
import scopt.OptionParser


case class TextClassificationParams(
  host: String = "localhost",
  port: Int = 9999,
  indexPath: String = "word2index.txt",
  sequenceLength: Int = 500,
  batchSize: Int = 128,
  partitionNum: Int = 4,
  model: String = "")

object StreamingTextClassification {

  def main(args: Array[String]) {
    val parser = new OptionParser[TextClassificationParams]("Streaming Text Classification") {
      opt[String]('h', "host")
        .text("host for network connection")
        .action((x, c) => c.copy(host = x))
      opt[Int]('p', "port")
        .text("Port for network connection")
        .action((x, c) => c.copy(port = x))
      opt[String]("indexPath")
        .text("Path of word to index text file")
        .action((x, c) => c.copy(indexPath = x))
        .required()
      opt[Int]("partitionNum")
        .text("The number of partitions to cut the dataset into")
        .action((x, c) => c.copy(partitionNum = x))
      opt[Int]("sequenceLength")
        .text("The length of each sequence")
        .action((x, c) => c.copy(sequenceLength = x))
      opt[Int]('b', "batchSize")
        .text("The number of samples per gradient update")
        .action((x, c) => c.copy(batchSize = x))
      opt[String]('m', "model")
        .text("Path of pre-trained Model")
        .action((x, c) => c.copy(model = x))
        .required()
    }


    parser.parse(args, TextClassificationParams()).map { param =>
      val sc = NNContext.initNNContext("Analytics Zoo Streaming Text Classification")
      val ssc = new StreamingContext(sc, Seconds(3))

      val model = TextClassifier.loadModel[Float](param.model)
      // Create a socket stream on target ip:port and count the
      // words in input stream of \n delimited text (eg. generated by 'nc')
      // Note that no duplication in storage level only for running locally.
      // Replication necessary in distributed scenario for fault tolerance.
      val lines = ssc.socketTextStream(param.host,
        param.port, StorageLevel.MEMORY_AND_DISK_SER)

      lines.foreachRDD { lineRdd =>
        if (!lineRdd.partitions.isEmpty) {
          // RDD to TextFeature
          val textFeature = lineRdd.map(x => TextFeature.apply(x))
          // RDD[TextFeature] to TextSet
          val dataSet = TextSet.rdd(textFeature)
          dataSet.loadWordIndex(param.indexPath)
          // Pre-processing
          val transformed = dataSet.tokenize().normalize()
            .word2idx()
            .shapeSequence(param.sequenceLength).generateSample()
          val predictSet = model.predict(transformed,
            batchPerThread = param.partitionNum)
          // Print result
          predictSet.toDistributed()
            .rdd.take(5)
            .map(_.getPredict.toTensor)
            .foreach(println)
        }
      }
      ssc.start()
      ssc.awaitTermination()
    }
  }
}
