package com.oreilly.learningsparkexamples.scala

import org.apache.spark._
import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper

//必须定义在类之前
case class Person(name:String,lovesPandas:Boolean)

/**
  * JSON 文件加载与保存
  */
object BasicParseJsonWithJackson {

  def main(args: Array[String]): Unit = {
    if(args.length < 3){
      println("Usage: [sparkmaster] [inputfile] [outputfile]")
      System.exit(1)
    }
    val master = args(0)
    val inputFile = args(1)
    val outputFile = args(2)

    val sc = new SparkContext(master,"BasicParseJsonWithJackson")
    val input = sc.textFile(inputFile)

    //使用 mapPartitions 将json数据指定为　case class
    val result = input.mapPartitions(records => {
      val mapper = new ObjectMapper with ScalaObjectMapper
      mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES,false)
      mapper.registerModule(DefaultScalaModule)
      records.flatMap(record =>{
        try {
          Some(mapper.readValue(record, classOf[Person]))
        } catch{
          case e: Exception => None
        }
      })
    },true)

    //喜欢panda的人输出
    result.filter(_.lovesPandas).mapPartitions(records => {
      val mapper = new ObjectMapper with ScalaObjectMapper
      mapper.registerModule(DefaultScalaModule)
      records.map(mapper.writeValueAsString(_))
    })
      .saveAsTextFile(outputFile)
  }
}