package org.cancer.service

import org.apache.spark.sql.{DataFrame, Dataset, SaveMode}
import org.cancer.common.TService
import org.cancer.util.{JDBCUtil, SparkUtil}
import org.apache.spark.sql.functions._

class ProportionStagesCancerService_Sylvia extends TService[Dataset[String]] {
  override def dataAnalysis(data: Dataset[String]): Unit = {
    val spark = SparkUtil.takeSpark()
    import spark.implicits._

    // 解析CSV数据并设置表头
    val header = data.first()
    val patientDF = spark.read
      .option("header", "true")
      .csv("input/ChineseCancerPatientInfo.csv")

    // 数据预处理：过滤无效数据
    val cleanedDF = patientDF
      .select("肿瘤类型", "癌症分期")
      .filter($"肿瘤类型".isNotNull && $"癌症分期".isNotNull)
      .filter($"癌症分期" =!= "未知") // 过滤未知分期数据

    // 统计各肿瘤类型各分期的病例数
    val stageCountDF = cleanedDF
      .groupBy("肿瘤类型", "癌症分期")
      .agg(count("*").alias("病例数"))

    // 计算各肿瘤类型的总病例数
    val totalCountDF = stageCountDF
      .groupBy("肿瘤类型")
      .agg(sum("病例数").alias("总病例数"))

    // 计算各分期占比
    val proportionDF = stageCountDF
      .join(totalCountDF, "肿瘤类型")
      .withColumn("分期占比", round($"病例数" / $"总病例数", 2)) // 保留4位小数

    // 按年龄段和排名排序
    val resultDF = proportionDF.orderBy($"肿瘤类型", $"癌症分期")

    // 显示结果
    resultDF.show()

    // 存入数据库
    resultDF.write
      .format("jdbc")
      .option("url", "jdbc:mysql://node1:3306/cancer_patients")
      .option("driver", "com.mysql.cj.jdbc.Driver")
      .option("user", "root")
      .option("password", "123456")
      .option("dbtable", "ProportionStagesCancer_Sylvia")
      .mode(SaveMode.Append)
      .save()
  }
}