package com.shujia.spark.core

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

object Demo13StudentPartition {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName("studentPartition")
      .setMaster("local")

    val sc = new SparkContext(conf)

    val studentsRDD: RDD[String] = sc.textFile("data/students.txt",2)

    println("分区数量为："+studentsRDD.getNumPartitions)

    /**
      * 将文科和理科分别保存到不同文件中
      *
      */

    val studentGroupRDD: RDD[(String, Iterable[String])] = studentsRDD.groupBy((s: String) => s,new ClassPartition)

    val configuration = new Configuration()

    val fileSystem: FileSystem = FileSystem.get(configuration)

    if (fileSystem.exists(new Path("data/1"))){
      fileSystem.delete(new Path("data/1"),true)
    }

    studentGroupRDD.map(_._1).saveAsTextFile("data/1")

  }

}

class ClassPartition extends Partitioner{
  override def numPartitions: Int = 2

  override def getPartition(key: Any): Int = {

    //key每行数据，将每行数据切分
    val str: Array[String] = key.toString.split(",")

    //str(4) 取出班级
    if (str(4).startsWith("文科"))
      1
    else 0

  }
}
