package cn.doitedu.day03

import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, Partitioner, SparkConf, SparkContext}

import java.text
import java.text.SimpleDateFormat
import java.util.Calendar
import scala.collection.mutable

object T04_ContinuedLoginV2 {

  def main(args: Array[String]): Unit = {


    //1.创建SparkConf
    val conf = new SparkConf().setAppName("WordCount")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    val lines = sc.textFile("data/login.txt")

    val uidAndDt: RDD[(String, String)] = lines.map(line => {
      val fields = line.split(",")
      val uid = fields(0)
      val dt = fields(1)
      (uid, dt)
    }).distinct()

    //触发Action，返回全部的用户id
    val uidArray: Array[String] = uidAndDt.map(_._1).distinct().collect()

    //自定义分区器


    //shuffle时要向下分区内进行排序，要参与排序的字段必须在key中
    val uidDtAndNull: RDD[((String, String), Null)] = uidAndDt.map((_, null))

    val partitioner = new UidPartitioner(uidArray)

    val partitioned = uidDtAndNull.repartitionAndSortWithinPartitions(partitioner)

    //partitioned.saveAsTextFile("out/out33")
    partitioned.mapPartitions(it => {
      var index = 0
      //val sdf = new SimpleDateFormat("yyyy-MM-dd")
      it.map{
        case((uid, dt), _) => {
          index += 1
          (uid, dt, index)
        }
      }
    }).saveAsTextFile("out/out34")

  }
}

class UidPartitioner(val uidArray: Array[String]) extends Partitioner {

  //初始化分区的的规则
  val idToIndex = new mutable.HashMap[String, Int]()
  var index = 0
  for (uid <- uidArray) {
    idToIndex(uid) = index
    index += 1
  }


  override def numPartitions: Int = uidArray.length

  override def getPartition(key: Any): Int = {
    val uid = key.asInstanceOf[(String, String)]._1
    val index = idToIndex(uid)
    index
  }
}
