package com.shujia.spark.core

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

object Demo14StudentPartition {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("StudentPartition").setMaster("local")

    val sc = new SparkContext(conf)

    /**
      * 将文科和理科分别保存到不同文件中
      *
      */
    //读取数据
    val linesRDD: RDD[String] = sc.textFile("data/students.txt")

    val groupRDD: RDD[(String, Iterable[String])] = linesRDD.groupBy((line :String)=> line,new ClazzPartition)

    println("groupRDD的分区数："+groupRDD.getNumPartitions)

    //创建Hdfs配置文件
    val configuration = new Configuration()

    //创建FileSystem对象
    val fileSystem: FileSystem = FileSystem.get(configuration)

    //要保存的路径是否存在
    if (fileSystem.exists(new Path("data/class"))){
      //迭代删除文件目录
      fileSystem.delete(new Path("data/class"),true)
    }else{
      groupRDD.map(_._1).saveAsTextFile("data/class")
    }
  }
  //自定义一个类，实现上述功能
  class ClazzPartition extends Partitioner{
    override def numPartitions: Int = 2

    //写方法
    override def getPartition(key: Any): Int = {
      val clazz: String = key.toString.split(",")(4)
      if (clazz.startsWith("文科")){
        1
      }else{
        0
      }
    }
  }
}
