package cn.doitedu.day04

import org.apache.spark.{SparkConf, SparkContext}

import java.text.SimpleDateFormat

object T02_FlowCountV1 {

  def main(args: Array[String]): Unit = {

    //1.创建SparkConf
    val conf = new SparkConf().setAppName("WordCount")
      .setMaster("local[4]") //如果提交到集群中运行，setMaster必须注释掉

    //2.创建SparkContext
    val sc = new SparkContext(conf)

    val lines = sc.textFile("data/flow.txt")

    lines.map(line => {
      val fields = line.split(",")
      val uid = fields(0)
      val startTime = fields(1)
      val endTime = fields(2)
      val flow = fields(3).toDouble
      (uid, (startTime, endTime, flow))
    }).groupByKey()
      .flatMapValues(it => {
        val sorted = it.toList.sortBy(_._1)
        var tmp: String = null
        val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
        var flag = 0
        var sum_flag = 0
        sorted.map(t => {

          val startTime = t._1
          val endTime = t._2

          if (tmp != null) {
            if(sdf.parse(startTime).getTime - sdf.parse(tmp).getTime > 600000) {
              flag = 1
            } else {
              flag = 0
            }
          }
          tmp = endTime
          sum_flag += flag
          (startTime, endTime, sum_flag, t._3)
        })
      }).map{
      case(uid,(startTime, endTime, sum_flag, flow)) =>
        ((uid, sum_flag), (startTime, endTime, flow))
    }.reduceByKey((v1, v2) => {
      val startTime = Ordering[String].min(v1._1, v2._1)
      val endTime = Ordering[String].max(v1._2, v2._2)
      val sum = v1._3 + v2._3
      (startTime, endTime, sum)
    }).saveAsTextFile("out/out16")



  }

}

