package org.cancer.service

import org.apache.spark.sql.{DataFrame, Dataset}
import org.cancer.common.TService
import org.cancer.util.{JDBCUtil, SparkUtil}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.expressions.Window

class IncidenceCancerTopNService_Sylvia extends TService[Dataset[String]] {
  override def dataAnalysis(data: Dataset[String]): Unit = {
    val spark = SparkUtil.takeSpark()
    import spark.implicits._

    // 解析CSV数据并设置表头
    val header = data.first()
    val patientDF = spark.read
      .option("header", "true")
      .csv("input/ChineseCancerPatientInfo.csv")

    // 数据预处理：转换年龄列类型并过滤无效数据
    val cleanedDF = patientDF
      .withColumn("年龄", $"年龄".cast("int"))
      .filter($"年龄".isNotNull && $"肿瘤类型".isNotNull)

    // 年龄段划分函数
    val ageGroup = udf((age: Int) => {
      if (age < 44) "青年"
      else if (age <= 59) "中年"
      else "老年"
    })

    // 添加年龄段列
    val dfWithAgeGroup = cleanedDF.withColumn("年龄段", ageGroup($"年龄"))

    // 统计各年龄段、各肿瘤类型的患者数量
    val countDF = dfWithAgeGroup
      .groupBy($"年龄段", $"肿瘤类型")
      .agg(count("患者编号").alias("患者数量"))

    // 定义年龄段排序映射
    val ageGroupOrder = Map("青年" -> 1, "中年" -> 2, "老年" -> 3)
    val ageGroupOrderUDF = udf((ageGroup: String) => ageGroupOrder.getOrElse(ageGroup, 0))

    // 按年龄段分组，按患者数量降序排序
    val windowSpec = Window.partitionBy($"年龄段").orderBy($"患者数量".desc)

    // 添加排名列
    val rankedDF = countDF.withColumn("排名", rank().over(windowSpec))

    // 按年龄段排序（青年->中年->老年）和排名排序
    val resultDF = rankedDF
      .withColumn("年龄段排序", ageGroupOrderUDF($"年龄段"))
      .orderBy($"年龄段排序", $"排名")
      .drop("年龄段排序")

    // 显示结果
    resultDF.show()

    // 存入数据库
    resultDF.write
      .format("jdbc")
      .option("url", "jdbc:mysql://node1:3306/cancer_patients")
      .option("driver", "com.mysql.cj.jdbc.Driver")
      .option("user", "root")
      .option("password", "123456")
      .option("dbtable", "incidenceCancerTopN_Sylvia")
      .mode(SaveMode.Append)
      .save()
  }
}