package demo.spark.stream

`import java.io.{File, FileWriter, PrintWriter}
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter
import java.util.{Collections, Date, Timer, TimerTask, UUID}
import java.util.stream.Collectors

import demo.spark.hurypoint.logpaser.SeqAccumulator
import demo.spark.stream.format.{FileOffsetBytesFormat, LengthUnfixedBytesWritable, PathAndOffsetWritable}
import demo.spark.utils.SparkSql
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.{FixedLengthInputFormat, TextInputFormat}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Duration, Seconds, StreamingContext}

import scala.collection.mutable.Queue


/**
 * Spark Streaming 意在处理实时产生的数据流，在时间窗口期捕捉流入的数据进行批处理，
 * 提供了DSteam流式处理的高级抽象，窗口期对内进行数据的批获取批处理，对外暴露RDD进行实时聚合.
 */
object StreamingDemo {

  def main(args: Array[String]): Unit = {
    /* Collector data from TCP, no bytes required.*/
    //acceptBatchDataFromTCP();

    /* Collector data from files, when file had been changed,
     deal only the newLine context has been changed. */
    //acceptTextDataFromFileSystem(monitorPeriod = 10);

    /* Collector data from files, when file had been changed,
    deal all the whole file, from seek(0) to tail, bytes block
    will be split by your argument "bytesFetchLen"*/
    //acceptStreamDataFromFileSystemByFixedSegment(5,monitorPeriod = 5);

    /* Collector data from files which had been monitored,
    once someone file occur change, the whole file will be
    reading and parsed by segment blocks, data stream will be
    blocked, and the blocks's bytes length is un fixed, max length
    is 'maxSegmentLen' */
    //acceptStreamDataFromFileSystemByUnFixedSegment(5,monitorPeriod = 10,charSet = "utf-8");

    /* Collector data from Queue, monitor data which flow into window in period. */
    //acceptStreamDataFromQueueAndDealerInWindow(monitorPeriod = 3, windowLen = 9, slideLen = 3);

    /* Test check point to continue spark streaming's working, but failed. */
    testCheckPointContinueWorking(monitorPeriod = 1, windowLen = 3, slideLen = 1,
      monitorFileDir = StreamTestRunner.CHECK_POINT_MONITOR_FILE,
      checkPointPath = StreamTestRunner.CHECK_POINT_PATH
    )


  }

  /**
   * This function attend to test spark streaming's check point
   * to continue streaming's working flow, but actually failed!
   * @param monitorPeriod
   * @param windowLen
   * @param slideLen
   * @param monitorFileDir
   * @param checkPointPath
   */
  def testCheckPointContinueWorking(monitorPeriod: Int,
           windowLen: Int,
           slideLen: Int,
           monitorFileDir: String = StreamTestRunner.CHECK_POINT_MONITOR_FILE,
           checkPointPath: String = StreamTestRunner.CHECK_POINT_PATH): Unit = {
    // required check point is necessary.
    require(checkPointPath != null,"required check point is necessary.")

    // 0. stream context's creating function
    def getStreamContextWithCheckPoint(): StreamingContext = {
      val sparkConf: SparkConf = new SparkConf()
        .setMaster("local[*]")
        .setAppName("StreamingDemo");
      val streamContext: StreamingContext = new StreamingContext(sparkConf, Duration.apply(monitorPeriod * 1000))
      // spark streaming's check point store dir, for metadata check point and data check point.
      streamContext.checkpoint(checkPointPath)
      streamContext
    }

    // 1. get or create stream context with check point.
    val streamContext = StreamingContext.getOrCreate(checkPointPath, getStreamContextWithCheckPoint _)
    // 2. SparkContent.
    val sparkContext: SparkContext = streamContext.sparkContext;
    // 3. create SparkSession.
    val sparkSession: SparkSession = SparkSession.builder().config(sparkContext.getConf).getOrCreate();
    // 4. log level using "WARN".
    sparkContext.setLogLevel("WARN")

    // 5. create a board cast every window slided report's terminated.
    val report_terminated: Broadcast[String] =
      streamContext.sparkContext.broadcast[String](Collections.nCopies(40, "#").toArray.mkString("", "", "\n\t"))

    // 6. create a file dir to monitor.
    val file: java.io.File = new File(monitorFileDir);
    //if(file exists) file.delete()

    // 7. open writer from monitor file.
    val writer: PrintWriter = new PrintWriter(new FileWriter(file,true),true)

    import sparkSession.implicits._;

    try {
      // datetime now.
      var dateTime: LocalDateTime = LocalDateTime.now()
      val df: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")

      // create a icasue.runninginvoke.task to generic three random double values,
      // and make them to become a RDD[String], then push the RDD into queue.
      val timerTask: TimerTask = new TimerTask {
        override def run(): Unit = file.synchronized {
          val timeFormat: String = df.format(dateTime)
          dateTime = dateTime.plusSeconds(1)
          val tuple_str = Stream.iterate[(String, Double)](null)(_ => (timeFormat, Math.random())).drop(1).take(3)
            .map(tuple2 => "(%s,%s)".format(tuple2._1, tuple2._2)).toArray.mkString("", "\t", "")
          writer.println(tuple_str)
        }
      }

      // create a timer to execute data generic icasue.runninginvoke.task by period one second, first time to execute is now().
      val timer: Timer = new Timer("data generic timer", false);
      timer.schedule(timerTask, new Date, 1 * 1000)

      streamContext.fileStream[LongWritable, Text, TextInputFormat]("/Users/icasue/Desktop/lbs-server-plugins/icasue-plugins-demos/icasue-demo-spark/monitor_dir/")
          .map[String](offsetAndText => offsetAndText._2.toString)
          .flatMap(lines => lines.split("\t"))
          .map(dataUnit => dataUnit.substring(1, dataUnit.length - 1).split(","))
          .map(array2Ele =>  ( array2Ele(0) , java.lang.Double.valueOf(array2Ele(1)) ) )
          .transform[(String,java.lang.Double)]( (splitsRDD: RDD[(String,java.lang.Double)]) => {
            splitsRDD.toDF("dul_split_time", "dul_split")
              .show()
            splitsRDD
          })
          .checkpoint(Duration.apply(
            Stream.iterate[Int](slideLen)(_ + slideLen)
              .dropWhile(_ < 10)
              .take(1)
              .find(_ => true)
              .get * 1000
          ))
          .reduceByKeyAndWindow(
            (left: java.lang.Double,right: java.lang.Double) => left + right,
            (left: java.lang.Double,right: java.lang.Double) => left - right,
            Duration.apply(windowLen * 1000),
            Duration.apply(slideLen * 1000),
            1
          )
          .checkpoint(Duration.apply(
            Stream.iterate[Int](slideLen)(_ + slideLen)
              .dropWhile(_ < 10)
              .take(1)
              .find(_ => true)
              .get * 1000
          ))
          .foreachRDD((aggregateRDD: RDD[(String, java.lang.Double)]) => {
            aggregateRDD.toDF("time", "sum(double)")
              .sort($"time".asc)
              .show()
            println(report_terminated.value)
          })

      // monitor staring..., waiting intercept.
      streamContext.start();
      streamContext.awaitTermination();
    }finally {
      // release resources.
      writer.close();
      streamContext.stop(true);
      report_terminated.destroy();
      sparkSession.stop();
    }

  }

  /**
   *
   * @param monitorPeriod DStream数据流读取周期(Second).
   * @param windowLen 窗口长度, 窗口长度内需要包含1个/多个完整的读取/输入周期，
   *                  因此窗口长度需为监控周期的整数倍(Second).
   * @param slideLen 窗口滑动的间隔, 窗口滑动后需要包含下一批次的完整读取/输入,
   *                 因此窗口滑动间隔需要为监控周期的整数倍(Second).
   */
  def acceptStreamDataFromQueueAndDealerInWindow(monitorPeriod: Int,
                                                  windowLen: Int,
                                                  slideLen: Int): Unit = {
    // 1. get or create stream context with check point.
    val sparkConf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("StreamingDemo");
    val streamContext: StreamingContext = new StreamingContext(sparkConf, Duration.apply(monitorPeriod * 1000))
    // 2. SparkContent.
    val sparkContext: SparkContext = streamContext.sparkContext;
    // 3. create SparkSession.
    val sparkSession: SparkSession = SparkSession.builder().config(sparkContext.getConf).getOrCreate();
    // 4. log level using "WARN".
    sparkContext.setLogLevel("WARN")

    // 5. create a board cast every window slided report's terminated.
    val report_terminated: Broadcast[String] =
      streamContext.sparkContext.broadcast[String](Collections.nCopies(40, "#").toArray.mkString("", "", "\n\t"))

    import sparkSession.implicits._;

    try {
      // datetime now.
      var dateTime: LocalDateTime = LocalDateTime.now()
      val df: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")

      // start thread to push double value into a queue,
      // using queue to start a monitor and create cyclical data stream.
      val queue: Queue[RDD[(String,Double)]] = new Queue[RDD[(String,Double)]]

      // create a icasue.runninginvoke.task to generic three random double values,
      // and make them to become a RDD[(String,Double)], then push the RDD into queue.
      val timerTask: TimerTask = new TimerTask {
        override def run(): Unit = queue.synchronized {
          val timeFormat: String = df.format(dateTime)
          queue += sparkContext.makeRDD[(String,Double)] (Stream.iterate[(String,Double)](null)(_ => (timeFormat,Math.random())).drop(1).take(3).toSeq)
          dateTime = dateTime.plusSeconds(1)
        }
      }

      // create a timer to execute data generic icasue.runninginvoke.task by period one second, first time to execute is now().
      val timer: Timer = new Timer("data generic timer", false);
      timer.schedule(timerTask, new Date, 1 * 1000)

      // by default, spark streaming will drop data in flow when has no any action!
      // calculate which had defined on after steps, but if users open multi stream (these stream exist as Async)
      // and expect to execute "join" on these async streams delay times, on this time, maybe data splits in someone stream
      // had been dropped! "join" will be meaning less! for this case, spark streaming supplied {@ streamCxt.remember(time) }
      // to keep data which flowed into stream in param times alive in stream caching to complete the "case-cade" operate
      // occur between async streams!
      // on this example:
      //    set stream context to remember flow data for five minutes.
      streamContext.remember(Duration.apply(60 * 1000 * 5))

      // open monitor and create DStream[(String,Double)] with Queue[RDD[(String,Double)]].
      streamContext.queueStream(queue,oneAtATime = true)
          // function to mixed data which in current window, this operate such like "group".
          //.reduceByKeyAndWindow()
          // function to collect values as sequence by key, and deal
          //.updateStateByKey()
          .transform[(String,Double)]( (splitsRDD: RDD[(String,Double)]) => {
            // register double splits into dataFrame and show.
            splitsRDD.toDF("dul_split_time", "dul_split")
              .show()
            splitsRDD
          })
          .reduceByKeyAndWindow(
            (left: Double , right: Double) => left + right,
            Duration.apply(windowLen * 1000),
            Duration.apply(slideLen * 1000)
          )
          .foreachRDD((aggregateRDD: RDD[(String,Double)]) => {
              // register sum(split) into dataFrame, and show.
              aggregateRDD.toDF("time", "sum(double)")
                .sort($"time".asc)
                .show()
              println(report_terminated.value)
          })

      // monitor staring..., waiting intercept.
      streamContext.start();
      streamContext.awaitTermination();
    }finally {
      // release resources.
      streamContext.stop(true);
      report_terminated.destroy();
      sparkSession.stop();
    }
  }

  /**
   * 按照非固定大小，从文件系统获取变更的字节数据以及分段读取每段的偏移量以及文件目录，
   * 是 {@FixedLengthInputFormat} 的变体，分段读取最大读取段长度为 {@Param maxSegmentLen}
   * 非固定长度读取，对每次文件写入字节数无要求！
   *
   * 优势：(相比固定长度字节读取的方式)
   *    1.解决了 "必须按照固定段落的整倍数" 进行文件写入的问题！
   *    2.同时舍弃了 读取记录次数，而直接透出本次读取截止的偏移量，可能用户更关注文件偏移位置处发生了什么，
   *      这种高级API的方式避免了用户一次多余的映射去计算偏移量(recordCount * fixedSegmentLen) 而直接采用 offset!
   *    3.同时透出了变更文件在文件系统的路径, 解决了例如用户监控了多个目录文件，想对文件偏移位置进行纠错与分析时无法知道
   *      错误发生在哪个具体的文件!
   */
  def acceptStreamDataFromFileSystemByUnFixedSegment(maxSegmentLen: Int = 10,
                                     charSet: String = "ISO-8859-1",
                                     monitorPeriod: Int = 10): Unit = {
    // 创建StreamContext
    val sparkConf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("StreamingDemo");
    val streamContext: StreamingContext = new StreamingContext(sparkConf, Duration.apply(monitorPeriod * 1000))
    // 创建SparkSession
    val sparkSession: SparkSession = SparkSql.getSession("FileStreamMonitor")("local[*]");
    // SparkContent.
    val sparkContext: SparkContext = streamContext.sparkContext;
    // 日志级别 WARN.
    sparkContext.setLogLevel("INFO")

    import sparkSession.implicits._;

    // 创建并注册字节数组收集器.
    val byteAryAccumulator: SeqAccumulator[Array[Byte],Array[Byte]] =
      new SeqAccumulator[Array[Byte],Array[Byte]](origin => origin)
    sparkContext.register(byteAryAccumulator,"byteAryAccumulator")

    // 创建并注册字符串收集器.
    val stringAccumulator: SeqAccumulator[Array[Byte],String] =
      new SeqAccumulator[Array[Byte],String](new String(_,"utf-8"))
    sparkContext.register(stringAccumulator,"stringAccumulator")

    try {
      val monitorDir: String = StreamTestRunner.MONITOR_DIR_BINARY;
      // 创建非定长字节读取方式的Monitor, 将读取到的字节数组/字符串 扔进收集器.
      streamContext.fileStream[PathAndOffsetWritable,LengthUnfixedBytesWritable,FileOffsetBytesFormat](
        directory = monitorDir,
        filter = (path: Path) => !path.getName().startsWith("."),
        newFilesOnly = false,
        conf = new Configuration(){
          // 处理字节的变更最大的长度段(遇到EOF时可变).
          set(FileOffsetBytesFormat.MAX_SEGMENT_LENGTH_PROPERTY,maxSegmentLen.toString)
        }
      )
        //.updateStateByKey()
        // 更新key的状态: 按照key进行分组, 对val序列进行聚合，产生一个新 DStream[(key,newVal)]
        //.transform()
        // 转换RDD[T]: 暴露当前批处理的通过Rdd[U]单元，进行映射，映射过程可以使用外部的Rdd[Other]单元参与复杂运算，例如join，filter等，生成一个新的Rdd[V]，并包装为DStream[V]
        //.transformWith()
        // 转换RDD[T]: 功能与transform()类似.
        .map((offSetAndByteWriter: (PathAndOffsetWritable,LengthUnfixedBytesWritable)) => (offSetAndByteWriter._1.get(),offSetAndByteWriter._1.getFilePath(),offSetAndByteWriter._2.getBytes))
        // .window() 可以改变Monitor的fetch窗口期.
        .foreachRDD((offsetAndBatchBytes: RDD[(Long,Path,Array[Byte])]) => {
          // 转化与收集，输出偏移量，文件路径，变更字节长度，变更字节utf编码文本内容(part)
          offsetAndBatchBytes
            .map(tuple3 => {
              val transStr: String = new String(tuple3._3, charSet)
              byteAryAccumulator.add(tuple3._3)
              stringAccumulator.add(tuple3._3)
              (tuple3._1, tuple3._2.toString,tuple3._2.getName,tuple3._3.length, transStr)
            })
            .toDF("offset", "fsPath", "fName","byteLen", "contents")
            .show()

          // 将两个收集器中的字节/字符串拼接，期待为输入的源字符串(已输入的完整内容)
          val completeFormSegmentString = stringAccumulator.value.toArray.mkString("【", "", "】")
          println(s"### String收集器当前完整内容：\n\t${completeFormSegmentString}")

          val completeFormSegmentByteAry = byteAryAccumulator.value.stream()
            /*.map[String](new java.util.function.Function[Array[Byte],String]() {
              override def apply(bytes: Array[Byte]): String = new String(Base64.getDecoder().decode(bytes),"ISO-8859-1")
            })*/
            /*.map[String](new java.util.function.Function[Array[Byte],String]() {
              override def apply(bytes: Array[Byte]): String = new String(Base64.getDecoder().decode(bytes),"utf-8")
            })*/
            //.map[String](new String(_,"utf-8"))
            .map[String]( bytes => {
              new String(bytes,charSet)
            })
            .collect(Collectors.toList[String])
            .toArray
            .mkString("【", "", "】")
          println(s"### Array[Byte]收集器当前完整内容：\n\t${completeFormSegmentByteAry}")
        })

      streamContext.start();
      streamContext.awaitTermination();
    }finally {
      streamContext.stop(true)
      sparkSession.stop()
    }
  }

  /**
   * 按照固定大小，从文件系统分段变更的字节数据，每次变更长度应为 fixedSegmentLen 的倍数，
   * 要求按照固定字节长度读取分段数据时不能出现EOF.
   * 文件指针split fixedSegmentLen 为单次记录读取的block,
   * NOTICE: 当监控目录下的文件发生变动，且多次变动的总字节数大于 fixedSegmentLen，
   * 整个变动的文件会被按照 fixedSegmentLen 进行拆分block并传入流读取！(不友好)
   *
   * 分析：这里也不难清楚，Spark设计这种读取方式是为了某种特殊的需求，每次变动的字节数就是固定的
   * 且固定变动的字节内容发生在某一个文件中，而不是固定的字节数分片后存储在不同的文件上！
   */
  def acceptStreamDataFromFileSystemByFixedSegment(fixedSegmentLen: Int = 10,
                                     charSet: String = "ISO-8859-1",
                                     monitorPeriod: Int = 10): Unit = {
    // 创建StreamContext
    val sparkConf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("StreamingDemo");
    val streamContext: StreamingContext = new StreamingContext(sparkConf, Duration.apply(monitorPeriod * 1000))
    // 创建SparkSession
    val sparkSession: SparkSession = SparkSql.getSession("FileStreamMonitor")("local[*]");
    // SparkContent.
    val sparkContext: SparkContext = streamContext.sparkContext;
    // 日志级别 WARN.
    sparkContext.setLogLevel("WARN")

    import sparkSession.implicits._;

    // 创建并注册字节数组收集器.
    val byteAryAccumulator: SeqAccumulator[Array[Byte],Array[Byte]] =
      new SeqAccumulator[Array[Byte],Array[Byte]](origin => origin)
    sparkContext.register(byteAryAccumulator,"byteAryAccumulator")

    // 创建并注册字符串收集器.
    val stringAccumulator: SeqAccumulator[Array[Byte],String] =
      new SeqAccumulator[Array[Byte],String](new String(_,"utf-8"))
    sparkContext.register(stringAccumulator,"stringAccumulator")

    try {
      val monitorDir: String = StreamTestRunner.MONITOR_DIR_BINARY;
      // 创建字节读取方式的Monitor, 将读取到的字节数组/字符串 扔进收集器.
      streamContext.fileStream[LongWritable,BytesWritable,FixedLengthInputFormat](
          directory = monitorDir,
          filter = (path: Path) => !path.getName().startsWith("."),
          newFilesOnly = false,
          conf = new Configuration(){
            // 处理字节的变更固定的字节长度段(遇到EOF不可变，会抛异常).
            set(FixedLengthInputFormat.FIXED_RECORD_LENGTH,fixedSegmentLen.toString)
          }
        )
        .map((offSetAndByteWriter: (LongWritable,BytesWritable)) => (offSetAndByteWriter._1.get(),offSetAndByteWriter._2.getBytes))
        // .window() 可以改变Monitor的fetch窗口期.
        .foreachRDD((offsetAndBatchBytes: RDD[(Long,Array[Byte])]) => {
          // 转化与收集，输出记录读取次数，变更字节长度，变更字节utf编码文本内容(part)
          offsetAndBatchBytes
            .map(tuple2 => {
              val transStr: String = new String(tuple2._2, charSet)
              byteAryAccumulator.add(tuple2._2)
              stringAccumulator.add(tuple2._2)
              (tuple2._1, tuple2._2.length, transStr)
            })
            .toDF("recordIdx", "byteLen", "contents")
            .show()

          // 将两个收集器中的字节/字符串拼接，期待为输入的源字符串(已输入的完整内容)
          val completeFormSegmentString = stringAccumulator.value.toArray.mkString("【", "", "】")
          println(s"### String收集器当前完整内容：\n\t${completeFormSegmentString}")

          val completeFormSegmentByteAry = byteAryAccumulator.value.stream()
            /*.map[String](new java.util.function.Function[Array[Byte],String]() {
              override def apply(bytes: Array[Byte]): String = new String(Base64.getDecoder().decode(bytes),"ISO-8859-1")
            })*/
            /*.map[String](new java.util.function.Function[Array[Byte],String]() {
              override def apply(bytes: Array[Byte]): String = new String(Base64.getDecoder().decode(bytes),"utf-8")
            })*/
            //.map[String](new String(_,"utf-8"))
            .map[String]( bytes => {
              new String(bytes,charSet)
            })
            .collect(Collectors.toList[String])
            .toArray
            .mkString("【", "", "】")
          println(s"### Array[Byte]收集器当前完整内容：\n\t${completeFormSegmentByteAry}")
        })

      streamContext.start();
      streamContext.awaitTermination();

    }finally {
      streamContext.stop(true)
      sparkSession.stop()
    }
  }

  /**
   * 从文件系统获取变更的文本数据
   */
  def acceptTextDataFromFileSystem(delimiter: String = " ",
                                   monitorPeriod: Int = 10): Unit = {
    // 创建StreamContext
    val sparkConf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("StreamingDemo");
    val streamContext: StreamingContext = new StreamingContext(sparkConf, Duration.apply(monitorPeriod * 1000))
    // 创建SparkSession
    val sparkSession: SparkSession = SparkSql.getSession("FileStreamMonitor")("local[*]");

    import sparkSession.implicits._;

    try {
      val monitorDir = StreamTestRunner.MONITOR_DIR_TEXT;
      streamContext.textFileStream(monitorDir)
        .flatMap(_.split(delimiter))
        .map((_,1))
        .reduceByKey(_ + _)
        .foreachRDD( (wordCountRDD: RDD[(String,Int)] ) => {
          wordCountRDD.toDF("WORD","COUNT")
            .show()
        })
      streamContext.start();
      streamContext.awaitTermination();
    }finally {
      streamContext.stop(true)
      sparkSession.stop()
    }
  }

  /**
   * 处理来自TCP的文本数据，进行单词计数.
   * 使用来自 StreamTestRunner.scala 的服务端创造数据流 / 使用 nc -lk <port> 数据服务进行发送.
   */
  def acceptBatchDataFromTCP(): Unit = {
    // 创建StreamContext
    val sparkConf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("StreamingDemo");
    val streamContext: StreamingContext = new StreamingContext(sparkConf, Duration.apply(3 * 1000))

    // 接受来自TCP的文本数据，统计时间窗口期内单词出现的次数
    streamContext.socketTextStream("localhost",9999, storageLevel = StorageLevel.MEMORY_ONLY)
        // 在SparkStream监听数据的窗口期内，数据获取是分批次的。交由SparkEngine处理的数据也是多批次的.
        // 这里拿到的是 Array[line]
        .flatMap(lines => lines.split("\\ "))
        // 聚合统计.
        .map(word => (word,1))
        .reduceByKey(_ + _)
        .print();

    // 以上完成的是SparkStream对流数据的接入以及DStream的处理逻辑，真正的处理是一个长期持久的结果, 需要持续捕捉.
    streamContext.start()
    streamContext.awaitTermination();
  }

}
