package com.atbeijing.bigdata.spark.core.rdd.instance

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Spark02_Instance_Disk_Par_Data_1 {

    def main(args: Array[String]): Unit = {

        val conf = new SparkConf().setMaster("local[*]").setAppName("Par")
        val sc = new SparkContext(conf)

        // totalsize = 39
        // minPartitions = 2
        // goalsize = 39 / 2 = 19
        // 39 / 19 = 2...1
        // 1 / 19 < 10%
        // 2

        /*

        Hello World Spark@@ => 0123456789101112131415161718
        Scala Hive@@        => 192021222324252627282930
        hadoop@@            => 3132333435363738

        0 => [0, 19]   => Hello World Spark
                          Scala Hive

        1 => [19, 39]  => hadoop



         */

        val rdd : RDD[String] = sc.textFile("data/word.txt", 2)

        rdd.saveAsTextFile("output")

        sc.stop()
    }
}
