package spark.example

import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}

object transformJsonFormat {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("transformJsonFormat")
      .getOrCreate()

    import spark.implicits._
    val conf = new SparkConf()
    val sc = new SparkContext(conf)
    val textRDD = sc.textFile(args(0))
    val df = textRDD
      // 去掉 {,},[,] 这4个字符
      .map(_.replaceAll("[{\\[\\]}]",""))
      // 先以逗号切片，然后在以 ":" 切片
      .map(_.split(","))
      .map{x =>
        // x 是Array[String]
        x.map(_.split(":"))
      }//org.apache.spark.rdd.RDD[Array[Array[String]]] = Array(Array("UserID", "A2WOH395IHGS0T"),...)
      .map(x => aggregationFiveItem(x))
      // org.apache.spark.rdd.RDD[scala.collection.mutable.ArrayBuffer[(String, String, String, String, String)]]
      .flatMap(x => x)// org.apache.spark.rdd.RDD[(String, String, String, String, String)]
      .toDF("UserID","Rating","ReviewTime","Review","MealID")
    df.show(5,false)
    df.createOrReplaceTempView("mealDataTable")
    spark.sql("SELECT * FROM mealDataTable limit 5").show(5,false)
    spark.sql("SELECT COUNT(*) FROM mealDataTable").show()
  }
  import scala.collection.mutable.ArrayBuffer
  val arrBuff = new ArrayBuffer[Tuple5[String,String,String,String,String]]()
  def aggregationFiveItem(arr : Array[Array[String]]) : ArrayBuffer[Tuple5[String,String,String,String,String]]= {
    // 0 到倒数第5个数，否则会报数组越界
    for(i <- 0 until (arr.length,5) if i <= arr.length - 5){
      // 将数组的5个元素当成 ArrayBuffer 一个元素
      val t = (arr(i)(1),arr(i+1)(1),arr(i+2)(1),arr(i+3)(1),arr(i+4)(1))
      arrBuff += t
    }
    // arrBuff = ArrayBuffer(("A2WOH395IHGS0T","5.0","1496177056","B0040HNZTW"))
    arrBuff
  }
}