package cn.hnu.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable

object RddDemo01 {
  def main(args: Array[String]): Unit = {
    //创建Rdd
    val conf: SparkConf = new SparkConf().setAppName("Rdd-demo").setMaster("local[2]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
    val rdd1: RDD[Int] = sc.parallelize(1 to 10)
    val partitions: Int = rdd1.getNumPartitions
    println(rdd1)
    println(rdd1.collect().toBuffer) //ArrayBuffer(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
    println(partitions)  //2
    println("-"*20)
    //测试：textFile读取文本文件得到Rdd    wholeTextFiles读取文件夹下所有数据文件
    //  转换为可变长数组是什么？  分区数分别是多少？
    val personsRdd: RDD[String] = sc.textFile("data/person.csv")
    val personsArr: mutable.Buffer[String] = personsRdd.collect().toBuffer
    println(personsArr)
    println(personsRdd.getNumPartitions)
    //(文件名称1,文件的多行内容) 、(文件名称2,文件的多行内容) .....
    val ratingsRdd: RDD[(String, String)] = sc.wholeTextFiles("data/ratings10",3)
    println(ratingsRdd.collect().toBuffer)
    println(ratingsRdd.getNumPartitions)
    //读取hdfs文件得到Rdd
    val dataRdd: RDD[String] = sc.textFile("hdfs://node1/data")
    println(dataRdd.collect().toBuffer)
  }

}
