package com.whvc.scala

import org.apache.spark.{SparkConf, SparkContext}

object SparKCoreDemo {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("HelloWorldSpark")

    val sc = new SparkContext(sparkConf)

    val list = List(1, 2, 3, 4, 5)

    val rdd = sc.parallelize(list, numSlices = 4)
    println(s"rdd的分区数: ${rdd.partitions.size}")

    val rdd1 = sc.makeRDD(list)
    println(s"rdd1的分区数: ${rdd1.partitions.size}")

    val salaryRdd = sc.textFile(path = "data/Employee_salary_first_half.csv")
    val resultArray = salaryRdd.map(line => line + "//////").collect()
    resultArray.foreach(println)

    sc.stop()
  }
}