package org.cancer.service

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.cancer.util.SparkUtil

class TumorSizeTransitionService_Faye {
  def TumorSizeTransition(data: DataFrame): Unit = {
    val spark = SparkUtil.takeSpark()
    import spark.implicits._

    // 将肿瘤大小字段转换为Double类型
    val dataWithDoubleSize = data.withColumn("肿瘤大小", col("肿瘤大小").cast("double"))

    // 将DataFrame转换为RDD进行一些处理
    val tumorRDD: RDD[(Double, String)] = dataWithDoubleSize.select($"肿瘤大小", $"转移")
      .rdd.map(row => {
      (row.getAs[Double]("肿瘤大小"), row.getAs[String]("转移"))
    })

    // 使用RDD进行肿瘤大小分类
    val categorizedRDD = tumorRDD.map { case (size, metastasis) =>
      val category = size match {
        case s if s <= 0.5 => "微小型瘤"
        case s if s > 0.5 && s <= 1 => "小型瘤"
        case s if s > 1 && s <= 5 => "中型瘤"
        case _ => "大型瘤"
      }
      (category, metastasis)
    }

    // 将RDD转换回DataFrame以便使用DataFrame API进行聚合
    val categorizedDF = categorizedRDD.toDF("肿瘤大小分类", "转移状态")

    // 计算各分类的转移概率
    val metastasisProbability = categorizedDF
      .groupBy("肿瘤大小分类")
      .agg(
        count("*").alias("总病例数"),
        sum(when($"转移状态" === "是", 1).otherwise(0)).alias("转移病例数")
      )
      .withColumn("转移概率（%）", round($"转移病例数" / $"总病例数" * 100, 2))
      .sort(desc("转移概率（%）"))

    metastasisProbability.show()

    metastasisProbability.write
      .format("jdbc") //指定数据源 链接mysql odbc  ORACLE
      .option("url", "jdbc:mysql://node1:3306/cancer_patients") //链接本地 jdbc:mysql://localhost:3306/ 本地的数据库
      .option("driver", "com.mysql.cj.jdbc.Driver") //mysql 8.0以上：com.mysql.cj.jdbc.Driver
      .option("user", "root") //用户名
      .option("password", "123456") //密码
      .option("dbtable", "tumor_size_transition_faye") //表名
      .mode(SaveMode.Append) //当数据不存在的时候会自动创建
      .save()


    spark.stop()
  }
}
