package org.niit.service


import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.dstream.DStream
import org.niit.bean.AdClickData

import java.io.{FileWriter, PrintWriter}
import java.text.SimpleDateFormat
import java.util.Date
import scala.collection.mutable.ListBuffer

/**
 * Date:2025/6/5
 * Author：Ys
 * Description:
 */
class TimeCountService {


  def dataAnalysis(data: DStream[AdClickData]): Unit = {

    /*
       统计一分钟内的广告点击量，每10秒合并一次，
       8:33:00-8:33:10
       如果在8:33:06点击了广告 --> 划分到8:33:00
     */
    val mapDS: DStream[(Long, Int)] = data.map(line => {

      val newTs = line.ts.toLong / 10000 * 10000 // 毫秒转秒
      (newTs, 1)
    })

    val reduceDS: DStream[(Long, Int)] = mapDS.reduceByKeyAndWindow(
      (x: Int, y: Int) => {
        x + y
      }
      , Seconds(60),//统计一分钟内的数据
      Seconds(10)//每10秒合并一次
    )

    reduceDS.foreachRDD(rdd => {
      //就聚合后䣌数据 写入到 adclick.json文件
      //新建列表，用来存储最终格式化后的数据  最终结果
      val list = ListBuffer[String]()

      //获取的数据，根据Key,进行升序排序，返回一个数组
      val datas: Array[(Long, Int)] = rdd.sortByKey(true).collect()
      //得到数组，进行遍历，根据模式匹配对数据进行格式化
      datas.foreach{
        case (time,count)=>{
          //1.对时间进行格式化 只保留 时:分:秒
          val sdf = new SimpleDateFormat("HH:mm:ss")
          val timeStr = sdf.format(new Date(time.toLong))

          list.append(s""" {"xtime":"${timeStr}","yval": "${count}"}  """)
          println(list)
        }
      }
        //输出数据至adclick.json
      val out = new PrintWriter( new FileWriter("data/adclick.json") )
      out.println("[" + list.mkString(",") + "]")
      out.flush()
      out.close()
    })


  }


}
