package org.cancer.service

import org.apache.spark.sql.SaveMode
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.cancer.bean.CancersData
import org.cancer.dao.CancersDao

import java.text.SimpleDateFormat
import java.util.Date

class CancersHighAreaService$Laurel {
  def realtimeCancersAnalysis(data: DStream[CancersData])(implicit ssc: StreamingContext): Unit = {
    // 定义TopN参数（可配置）
    val TOP_N = 5
    /*
        实时统计 每天 各省份 各肿瘤类型 新增患者的高发省份TopN，并将其存入 MySQL
          Key : 时间，省份，肿瘤类型
          Value: 新增数量
          思路：将AdClickData 转换为 Key:时间，省份，肿瘤类型  Value:1
               相同的Key进行聚合
               按肿瘤类型分组，计算每个类型的TopN省份
             可以不用transform转换成rdd
         */

    val mapDS: DStream[((String, String, String), Int)] = data.map(line => {
      val sdf = new SimpleDateFormat("yyyy-MM-dd-")
      val day = sdf.format(new Date(line.ts.toLong))
      val province = line.province
      val cancers_type = line.cancers_type
      ((day, cancers_type, province), 1)
    })

    // 2. 全局累计
    val updateFunc = (seq: Seq[Int], opt: Option[Int]) => {
      Some(seq.sum + opt.getOrElse(0))
    }
    val stateDS: DStream[((String, String, String), Int)] = mapDS.updateStateByKey(updateFunc)


    // 3. 按（日期、肿瘤类型）分组，省份排序，取TopN
    val topNProvinceDS: DStream[(String, String, String, Int, Int)] = stateDS.map {
      case ((day, cancers_type, province), count) =>
        ((day, cancers_type), (province, count))
    }
      .groupByKey()
      .flatMap { case ((day, cancers_type), iter) =>
        iter.toList
          .sortBy(-_._2) // 按count降序
          .take(TOP_N)
          .zipWithIndex
          .map { case ((province, count), idx) =>
            (day, cancers_type, province, count, idx + 1)
          }
      }

    topNProvinceDS.foreachRDD(rdd => {
      if (!rdd.isEmpty) {
        // 转换为DataFrame
        import org.apache.spark.sql.SparkSession
        val spark = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()
        import spark.implicits._
        val df = rdd.toDF("day", "cancers_type",  "province", "patient_count", "rank")
        println(s"本批次数据条数: ${rdd.count()}")
        df.show()

        //存入数据库
        df.write
          .format("jdbc") //指定数据源 链接mysql odbc  ORACLE
          .option("url", "jdbc:mysql://node1:3306/cancer_patients") //链接本地 jdbc:mysql://localhost:3306/ 本地的数据库
          .option("driver", "com.mysql.cj.jdbc.Driver") //mysql 8.0以上：com.mysql.cj.jdbc.Driver
          .option("user", "root") //用户名
          .option("password", "123456") //密码
          .option("dbtable", "cancers_high_area_laurel") //表名
          .mode(SaveMode.Append) //当数据不存在的时候会自动创建
          .save()
        rdd.collect().foreach {
          case (day, cancers_type, province, count, rank) =>
            println(s"日期: $day,  肿瘤类型: $cancers_type,  省份: $province, 患者数: $count, 排名：$rank")


        }
      }
    })
  }


}
