package cn.doitedu.day03

import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, Partitioner, SparkConf, SparkContext}

import java.text
import java.text.SimpleDateFormat
import java.util.Calendar
import scala.collection.mutable

object T05_ContinuedLoginV3 {

  def main(args: Array[String]): Unit = {


    //1.创建SparkConf
    val conf = new SparkConf().setAppName("WordCount")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    val lines = sc.textFile("data/login.txt")

    val uidAndDt: RDD[(String, String)] = lines.map(line => {
      val fields = line.split(",")
      val uid = fields(0)
      val dt = fields(1)
      (uid, dt)
    }).distinct()


    //shuffle时要向下分区内进行排序，要参与排序的字段必须在key中
    val uidDtAndNull: RDD[((String, String), Null)] = uidAndDt.map((_, null))

    val partitioner = new MyHashPartitioner(uidDtAndNull.partitions.length)

    val partitioned = uidDtAndNull.repartitionAndSortWithinPartitions(partitioner)

    partitioned.saveAsTextFile("out/out01")

  }
}

class MyHashPartitioner(partitions: Int) extends Partitioner {

  override def numPartitions: Int = partitions

  override def getPartition(key: Any): Int = {
    val uid = key.asInstanceOf[(String, String)]._1
    val mod = uid.hashCode % numPartitions
    val index = mod + (if(mod < 0) numPartitions else 0)
    index
  }
}