package cn.doitedu.day04

import cn.doitedu.day03.MyHashPartitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import java.text.SimpleDateFormat

object T03_FlowCountV2 {

  def main(args: Array[String]): Unit = {

    //1.创建SparkConf
    val conf = new SparkConf().setAppName("WordCount")
      .setMaster("local[4]") //如果提交到集群中运行，setMaster必须注释掉

    //2.创建SparkContext
    val sc = new SparkContext(conf)

    val lines = sc.textFile("data/flow.txt")

    val tpRdd: RDD[((String, String), (String, Double))] = lines.map(line => {
      val fields = line.split(",")
      val uid = fields(0)
      val startTime = fields(1)
      val endTime = fields(2)
      val flow = fields(3).toDouble
      ((uid, startTime), (endTime, flow))
    })

    val partitioned = tpRdd.repartitionAndSortWithinPartitions(new MyHashPartitioner(tpRdd.partitions.length))

    partitioned.mapPartitions(it => {
      var tmpUid: String = null
      var tmpTime: String = null
      var flag = 0
      var sum_flag = 0
      val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
      it.map {
        case ((uid, startTime), (endTime, flow)) => {
          if (uid.equals(tmpUid)) { //用户ID相同，说明是同一个用户的数据
            //下一条的起始时间  减去 上一条的结束时间
            if (sdf.parse(startTime).getTime - sdf.parse(tmpTime).getTime > 600000) {
              flag = 1
            } else {
              flag = 0
            }
          } else { //用户ID不同，不是同一用户的数据
            flag = 0
            sum_flag = 0
            tmpTime = null
          }
          tmpTime = endTime
          tmpUid = uid
          sum_flag += flag
          ((uid, sum_flag), (startTime, endTime, flow))
        }
      }
    }).saveAsTextFile("out/out19")
  }

}

