package com.atguigu0.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.util.parsing.json.JSON

/**
 * @description: xxx
 * @time: 2020/6/13 20:35
 * @author: baojinlong
 **/
object ReadDataDemo {

  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("myWordCount").setMaster("local[*]")
    // 创建sparkContext对象
    val sc: SparkContext = new SparkContext(sparkConf)
    // 读取hdfs上文件
    val hdfsFile: RDD[String] = sc.textFile("hdfs:hadoop102:9200/fruit.txt")
    println(hdfsFile)
    // 保存文件
    // hdfsFile.saveAsTextFile("fruitOut")
    println("01:读取json")
    val value: RDD[String] = sc.textFile("E:/test-data/input/my-json.txt")

    /**
     * /*假设result是Json字符串*/
     * val result = http.get(url)
     * val json = JsonParser(result)
     * val jsObj = json.asJsObject()
     * println(jsObj.getFields("code"))
     */
    value.foreach(println)
    val value1: RDD[Option[Any]] = value.map(JSON.parseRaw)
    value1.foreach(println)
    println(value1.collect)

    println("02:读取sequenceFile")
    val value2: RDD[(Int, Int)] = sc.parallelize(Array((1, 2), (3, 4), (5, 6)))
    value2.saveAsSequenceFile("E:/test-data/input/seqFileDemo")
    println(value2)
    val value3: RDD[(Int, Int)] = sc.sequenceFile[Int, Int]("E:/test-data/input/seqFileDemo")
    println(value3)
    println("03:读取Object")
    val value4: RDD[Int] = sc.parallelize(Array(1, 2, 3, 4))
    value4.saveAsObjectFile("E:/test-data/input/objectFile")
    val objFileRd = sc.objectFile[Int]("E:/test-data/input/objectFile")
    println(objFileRd)


    // 读取SequenceFile案例
    sc.stop()
  }
}
