    val file = spark.sparkContext.wholeTextFiles("file:///Users/Dandy/Desktop/jike-bigdata/a5-spark-core/geektime-bigdata-spark/spark-a1/data/1.txt")
    
    println("show the result")
    file.flatMap {
      lines =>
        val line = lines.split("\\.", -1)                      
      val trimLine = line(1).trim
        trimLine.substring(1, trimLine.length - 1).split(" ", -1).map {                           
          v =>
            (v, line(0))                                       
        }
    }.groupByKey()                                             
      .sortBy(_._1,true)                                       
      .foreach(x => println(s"${x._1}: {${x._2.map((_, 1)).groupBy(_._1)
        .mapValues(_.reduceRight((left, right) => (left._1, left._2 + right._2))).values.mkString(", ")}}"))
  
