package com.spark

import org.apache.spark.sql.SparkSession

import scala.collection.mutable

object SparkTest {
  def main(args: Array[String]): Unit = {
    val path = "/Users/zhaoguanglai/hadoop学习资料/0815work/resource"
    val spark = SparkSession.builder().appName("local").master("local[*]").getOrCreate()
    val sc = spark.sparkContext
    val files = sc.wholeTextFiles(path)
    val file_name_length = files.map(item=>item._1.split("/").length).collect()(0)
    val file_name_context = files.map(file => (file._1.split("/")(file_name_length-1), file._2)).sortByKey()

    val words = file_name_context.flatMap(
      x=>{
        val line = x._2.split("\n")
        val list = mutable.LinkedList[((String,String),Int)]()
        var temp = list
        for (i<- 0 until line.length){
          val word = line(i).split(" ").iterator
          while (word.hasNext){
            temp.next = mutable.LinkedList[((String,String),Int)](((x._1,word.next()),1))
            temp = temp.next
          }
        }
        val list_end=list.drop(1)
        list_end
      }
    )
    val result = words.reduceByKey(_+_).sortByKey().map(x=>(x._1._2,(x._1._1,x._2))).groupByKey().map({
      case (x, y)=> "\""+x+"\" : {"+ y.mkString(",")+"}"
    })
    println(result.collect().mkString("\n"))
  }
}
