import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}



         /**

10.       * Counts words in UTF8 encoded,'\n' delimited text received from the network every second.

11.       *

12.       * Usage: NetworkWordCount<hostname> <port>

13.       * <hostnamee> and<port> describe the TCP server that Spark Streaming would connect toreceive data.

14.       *

15.       * To run this on your localmachine, you need to first run a Netcat server

16.       *    `$ nc -lk 9999`

17.       * and then run the example

18.       *    `$ bin/run-example org.apache.spark.examples.streaming.NetworkWordCount localhost 9999`

19.       */
         object socketTextStream {

           def main(args: Array[String]) {
             println("**************get start..................")

            if (args.length < 2) {
                  println("args length  < 2 ,exit*****************")
                  System.err.println("Usage:NetworkWordCount <hostname> <port>")

                  System.exit(1)

          }


             println("**************get start..................")
             println(args(0)+args(1))




              // Create the context with a 1second batch size

              val sparkConf = new SparkConf().setAppName("NetworkWordCount")

              val ssc = new StreamingContext(sparkConf, Seconds(1))



              // Create a socket stream ontarget ip:port and count the

              // words in input stream of \ndelimited text (eg. generated by 'nc')

              // Note that no duplication instorage level only for running locally.

              // Replication necessary indistributed scenario for fault tolerance.

              val lines =ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER)

              val words =lines.flatMap(_.split(" "))

              val wordCounts = words.map(x=> (x, 1)).reduceByKey(_ + _)

              //wordCounts.print()

              wordCounts.saveAsTextFiles("word-count")
              //wordCounts.saveAsHadoopFiles("wordcount",".hdfs")  //bug
              ssc.start()

              ssc.awaitTermination()

            }

        }
