package com.atbeijing.bigdata.spark.mytest.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object WordCount3 {
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setMaster("local[4]").setAppName("word2")
    val context = new SparkContext(sparkConf)

    /**
     * 每个分区字节数 61/3=20
     * 剩余字节数 61/20=3...1
     * 剩余字节要不要增加分区 1 ➗ 20=5% >10% 不成立所以不增加分区数
     * 3个分区
     * 计算按字节,取数据按照偏移量
     *[0,20] [0,0+每个分区字节数]
     *[20,40]
     *[40,61]
     *
     */
    val w: RDD[String] = context.textFile("data/word.txt",3)
    //支持通配符
    //val w: RDD[String] = context.textFile("data/he*/word*.txt",2)
    w.saveAsTextFile("output")
//    val value: RDD[(String, String)] = context.wholeTextFiles("data/word.txt",2)
//    value.collect().foreach(println)

  }
}
