package SJY

import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.stat.ChiSquareTest
import org.apache.spark.ml.linalg.DenseVector
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.apache.spark.streaming.dstream.DStream
import bean.resume
import common.TService
import util.SparkUtil
import org.apache.spark.sql.functions._

class ResumeAnalysisService extends TService[resume] {

  /**
   * 分析工作经验与筛选结果的关系
   */
  def analyzeExperienceScreeningRelation(data: DStream[resume]): Unit = {
    val spark = SparkUtil.takeSpark()
    import spark.implicits._

    data.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        // 1. 转换为DataFrame并预处理
        val resumeDF = rdd.toDF()
          .withColumn("is_passed", when($"screening_result" === "通过", 1).otherwise(0))
          .withColumn("small_exp_index", experienceToIndex($"small_business_experience"))
          .withColumn("middle_exp_index", experienceToIndex($"middle_business_experience"))
          .withColumn("large_exp_index", experienceToIndex($"large_business_experience"))

        // 2. 批量进行卡方检验
        analyzeExperienceRelations(resumeDF)

        // 3. 保存分析结果到MySQL
        saveAnalysisResults(resumeDF)
      }
    }
  }

  /**
   * 工作经验转换为数值索引
   */
  private def experienceToIndex = udf((exp: String) => exp match {
    case "1-3年" => 0
    case "3-5年" => 1
    case "5年以上" => 2
    case _ => -1
  })

  /**
   * 批量执行卡方检验分析
   */
  private def analyzeExperienceRelations(df: DataFrame): Unit = {
    val expTypes = Seq(
      ("small_exp_index", "小型企业经验"),
      ("middle_exp_index", "中型企业经验"),
      ("large_exp_index", "大型企业经验")
    )

    expTypes.foreach { case (col, name) =>
      val assembler = new VectorAssembler()
        .setInputCols(Array(col, "is_passed"))
        .setOutputCol("features")

      val featureDF = assembler.transform(df)
      val chi = ChiSquareTest.test(featureDF, "features", "is_passed").head



      // ... existing code ...

      println(s"===== $name 与筛选结果相关性分析 =====")
      println(s"p值: ${chi.getAs[Double](0)}")

      val degreesOfFreedom = chi.getAs[DenseVector](1).toArray.map(_.toInt)
      println(s"自由度: ${degreesOfFreedom.mkString(",")}")

      val statistics = chi.getAs[DenseVector](2).toArray
      println(s"统计量: ${statistics.mkString(",")}")

    }
  }

  /**
   * 保存分析结果到数据库
   */
  private def saveAnalysisResults(df: DataFrame): Unit = {
    val resultDF = df.groupBy("screening_result",
      "small_business_experience",
      "middle_business_experience",
      "large_business_experience")
      .count()
      .withColumn("analysis_time", current_timestamp())

    resultDF.write
      .format("jdbc")
      .option("url", "jdbc:mysql://node1:3306/resume_analysis?useUnicode=true&characterEncoding=utf8")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "root")
      .option("dbtable", "experience_screening_relation")
      .mode(SaveMode.Append)
      .save()
  }

  override def dataAnalysis(data: resume): Unit = {
    // 实现抽象方法，但实际不使用
  }
}