package SJY

import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.stat.{ChiSquareTest, Correlation}
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.apache.spark.streaming.dstream.DStream
import bean.resume
import common.TService
import util.SparkUtil
import org.apache.spark.sql.functions._

class ProjectScaleAnalysisService extends TService[resume] {

  /**
   * 分析项目规模与筛选结果的关系
   */
  def analyzeProjectScaleRelation(data: DStream[resume]): Unit = {
    val spark = SparkUtil.takeSpark()
    import spark.implicits._

    data.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        // 1. 转换为DataFrame并预处理
        val resumeDF = rdd.toDF()
          .withColumn("is_passed", when($"screening_result" === "通过", 1).otherwise(0))
          .withColumn("small_scale_bin", scaleToBin($"small_scale_project"))
          .withColumn("middle_scale_bin", scaleToBin($"middle_scale_project"))
          .withColumn("large_scale_bin", scaleToBin($"large_scale_project"))

        // 2. 批量进行卡方检验
        analyzeScaleRelations(resumeDF)

        // 3. 相关系数分析
        analyzeScaleCorrelation(resumeDF)

        // 4. 保存分析结果到MySQL
        saveScaleAnalysisResults(resumeDF)
      }
    }
  }

  /**
   * 项目规模分箱处理 (0=无, 1=少量, 2=丰富)
   */
  private def scaleToBin = udf((count: Int) => count match {
    case 0 => 0
    case x if x <= 5 => 1
    case _ => 2
  })

  /**
   * 批量执行卡方检验分析
   */
  private def analyzeScaleRelations(df: DataFrame): Unit = {
    val scaleTypes = Seq(
      ("small_scale_bin", "小规模项目"),
      ("middle_scale_bin", "中规模项目"),
      ("large_scale_bin", "大规模项目")
    )

    scaleTypes.foreach { case (col, name) =>
      val assembler = new VectorAssembler()
        .setInputCols(Array(col, "is_passed"))
        .setOutputCol("features")

      val featureDF = assembler.transform(df)
      val chi = ChiSquareTest.test(featureDF, "features", "is_passed").head

      println(s"===== $name 与筛选结果相关性分析 =====")
      println(s"p值: ${chi.getAs[Double](0)}")
      println(s"自由度: ${chi.getSeq[Int](1).mkString(",")}")
      println(s"统计量: ${chi.getSeq[Double](2).mkString(",")}")
    }
  }

  /**
   * 分析项目数量与通过率的相关系数
   */
  private def analyzeScaleCorrelation(df: DataFrame): Unit = {
    val assembler = new VectorAssembler()
      .setInputCols(Array("small_scale_project", "middle_scale_project", "large_scale_project", "is_passed"))
      .setOutputCol("features")

    val correlation = Correlation.corr(assembler.transform(df), "features").head
    println("===== 项目数量与筛选结果的相关系数矩阵 =====")
    println(correlation.getAs[org.apache.spark.ml.linalg.Matrix](0).toString)
  }

  /**
   * 保存分析结果到数据库
   */
  private def saveScaleAnalysisResults(df: DataFrame): Unit = {
    val resultDF = df.groupBy("screening_result",
      "small_scale_project",
      "middle_scale_project",
      "large_scale_project")
      .count()
      .withColumn("analysis_time", current_timestamp())

    resultDF.write
      .format("jdbc")
      .option("url", "jdbc:mysql://node1:3306/resume_analysis?useUnicode=true&characterEncoding=utf8")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "root")
      .option("dbtable", "project_scale_relation")
      .mode(SaveMode.Append)
      .save()
  }

  override def dataAnalysis(data: resume): Unit = {
    // 实现抽象方法，但实际不使用
  }
}