package cn.jly.bigdata.spark.core

import com.alibaba.fastjson.JSONObject
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.util.parsing.json.JSON

/**
 * @author lanyangji
 * @date 2019/11/28 15:05
 */
object Spark10_JSON {

  def main(args: Array[String]): Unit = {

    val sc = new SparkContext(new SparkConf().setMaster("local[*]").setAppName("test-file"))

    // 前提是要保证json文件的每一行就是一个json记录，比如
    // {"name":"Michael"}
    // {"name":"Andy", "age":30}
    // {"name":"Justin", "age":19}
    // 否则读取会失败
    val jsonRdd: RDD[String] = sc.textFile("input/people.json")

    // Some(Map(name -> Michael)),Some(Map(name -> Andy, age -> 30.0)),Some(Map(name -> Justin, age -> 19.0))
    println(jsonRdd.map(JSON.parseFull).collect.mkString(","))

    // scala中处理JSON的类库过期了
    // 这里用阿里巴巴的 fastjson 代替
    // val jsonObjRdd: RDD[Any] = jsonRdd.map(com.alibaba.fastjson.JSON.parse).map(_.asInstanceOf[JSONObject])

    sc.stop
  }
}

case class Person(var name: String, var age: Int) {

  override def toString: String = name + "," + age
}
