package cn.doitedu.day03

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import java.text.SimpleDateFormat
import java.util.Calendar

object T01_ContinuedLoginV1 {

  def main(args: Array[String]): Unit = {


    //1.创建SparkConf
    val conf = new SparkConf().setAppName("WordCount")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    val lines = sc.textFile("data/login.txt")

    val uidAndDt: RDD[(String, String)] = lines.map(line => {
      val fields = line.split(",")
      val uid = fields(0)
      val dt = fields(1)
      (uid, dt)
    }).distinct()

    val grouped: RDD[(String, Iterable[String])] = uidAndDt.groupByKey()
    val rdd1: RDD[(String, (String, String))] = grouped.flatMapValues(it => {
      val sorted: List[String] = it.toList.sorted
      var index = 0
      val sdf = new SimpleDateFormat("yyyy-MM-dd")
      val calendar = Calendar.getInstance()
      sorted.map(dt => {
        index += 1
        val date = sdf.parse(dt)
        calendar.setTime(date)
        calendar.add(Calendar.DATE, -index)
        val time = calendar.getTime
        val dif = sdf.format(time)
        (dt, dif)
      })
    })
    val rdd2: RDD[((String, String), (String, String, Int))] = rdd1.map{
      case(uid, (dt, dif)) => ((uid, dif), (dt, dt, 1))
    }

    val rdd3: RDD[((String, String), (String, String, Int))] = rdd2.reduceByKey((v1, v2) => {
      val start = Ordering[String].min(v1._1, v2._1)
      val end = Ordering[String].max(v1._2, v2._2)
      val sum = v1._3 + v2._3
      (start, end, sum)
    })

    rdd3.map{
      case((uid, _), (start, end, sum)) => (uid, sum, start, end)
    }.filter(_._2 >= 3)
      .saveAsTextFile("out/out22")



    //rdd1.saveAsTextFile("out/out21")

  }
}
