package com.oreilly.learningsparkexamples.scala

import java.io.{StringReader, StringWriter}

import au.com.bytecode.opencsv.{CSVReader, CSVWriter}
import org.apache.spark._
import scala.collection.JavaConversions._

/**
  * CSV 文件加载与保存
  */
object BasicParseCsv{

  case class Person(name: String, favouriteAnimal: String)

  def main(args: Array[String]): Unit = {
    if(args.length<3){
      println("Usage: [sparkmaster] [inputfile] [outputfile]")
      System.exit(1)
    }

    val master = args(0)
    val inputFile = args(1)
    val outputFile = args(2)

    val sc = new SparkContext(master,"BasicParseCsv")
    val input = sc.textFile(inputFile)
    val result = input.map(line => {
      val reader = new CSVReader(new StringReader(line))
      reader.readNext()
    })

    val people = result.map(x => Person(x(0),x(1)))
    val pandaLovers = people.filter(p => p.favouriteAnimal == "panda")
    pandaLovers.map(person => List(person.name,person.favouriteAnimal).toArray)
      .mapPartitions{ people =>
        val stringWriter = new StringWriter()
        val csvWriter = new CSVWriter(stringWriter)
        csvWriter.writeAll(people.toList)
        Iterator(stringWriter.toString)
      }
      .saveAsTextFile(outputFile)
  }

}