package sparkml_study.suijisengling_YuCe

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.classification.RandomForestClassifier
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
import org.apache.spark.ml.feature.{StringIndexer, VectorAssembler}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

import java.util.Properties
//  todo 预测该动物是否会被领养/救助
object prediction {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("test")
      .enableHiveSupport()
      .getOrCreate()

    //  todo 读取数据
    val data = spark.table("animal.all_data")

    // todo 救助原因列索引化
    val index1 = new StringIndexer()
      .setInputCol("reason")
      .setOutputCol("reason_index")

    val string_index1 = index1.fit(data).transform(data)

    string_index1.select("reason","reason_index").show


    //  todo 动物类型索引化
    val index2 = new StringIndexer()
      .setInputCol("type")
      .setOutputCol("type_index")

    val data_index=index2.fit(string_index1).transform(string_index1)

    data_index.select("type","type_index").show

    //  todo 预测字段转化为标签编码
    val label_data = data_index.withColumn(
      "label",
      when(col("is_adopt") === lit("是"), 1.0).otherwise(0.0)
    )

    label_data.select("is_adopt").show


    //  todo 拆分训练集和预测集，将百分之80的数据用作训练，百分之20用作预测
   val Array(rating,test) =label_data.randomSplit(Array(0.8,0.2))

    //  todo 选取特征字段
    val tz_column=Array("type_index","reason_index")

    //  todo 特征字段向量化
    val assembler = new VectorAssembler()
      .setInputCols(tz_column)
      .setOutputCol("features")

    // 计算类别权重（假设label=1是少数类）
    val negCount = label_data.filter(col("label") === 0.0).count()
    val posCount = label_data.filter(col("label") === 1.0).count()
    val ratio = negCount.toDouble / posCount.toDouble

    // 添加权重列
    val rating_data = rating.withColumn("classWeight",
      when(col("label") === 1, ratio).otherwise(1.0)
    )

    // 添加权重列
    val test_data= test.withColumn("classWeight",
      when(col("label") === 1, ratio).otherwise(1.0)
    )

    //  todo 随机森林分类器
    val classifier = new RandomForestClassifier()
      .setWeightCol("classWeight")
      .setNumTrees(100) // 增加树的数量
      .setMaxDepth(10)

    //  todo 定义流水线
    val pipeline = new Pipeline()
      .setStages(Array(assembler,classifier))
      .fit(rating_data)

    //  todo 对测试集进行预测
    val result = pipeline.transform(test_data);


    result.show

    //  todo 对模型进行评估

    // todo 1. 创建评估器（计算AUC）
    val evaluator = new BinaryClassificationEvaluator()
      .setLabelCol("label")
      .setRawPredictionCol("rawPrediction")
      .setMetricName("areaUnderROC")

    // todo 2. 计算AUC值
    val auc = evaluator.evaluate(result)
    println(s"模型AUC = $auc") // AUC越接近1说明模型越好

    //  todo 将预测的结果写入mysql
    val conn = new Properties()
    conn.setProperty("user", "root")
    conn.setProperty("password", "123456")
    conn.setProperty("driver", "com.mysql.jdbc.Driver")

    //  删除数组列
    val result_2 = result.drop("features")
      .drop("rawPrediction")
      .drop("probability")

    result_2
      .write.mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/animal?useSSL=false","predication",conn)



    spark.close()
  }

}
