package SJY

import org.apache.spark.streaming.dstream.DStream
import bean.resume
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

class ResumeAnalysisController {

  private val experienceService = new ResumeAnalysisService()
  private val scaleService = new ProjectScaleAnalysisService()

  def fullAnalysis(data: DStream[resume]): Unit = {
    // 1. 基础分析
    experienceService.analyzeExperienceScreeningRelation(data)
    scaleService.analyzeProjectScaleRelation(data)

    // 2. 高级分析
    analyzeCombinedImpact(data)
    analyzeKeyFactors(data)
  }

  private def analyzeCombinedImpact(data: DStream[resume]): Unit = {
    data.foreachRDD { rdd =>
      val spark = SparkSession.builder.config(rdd.sparkContext.getConf).getOrCreate()
      import spark.implicits._

      rdd.toDF()
        .withColumn("exp_level",
          when($"large_business_experience" === "5年以上", "资深")
            .when($"middle_business_experience" === "5年以上", "成熟")
            .otherwise("初级"))
        .withColumn("project_level",
          when($"large_scale_project" > 1, "大型")
            .when($"middle_scale_project" > 3, "中型")
            .otherwise("小型"))
        .groupBy("exp_level", "project_level", "screening_result")
        .count()
        .orderBy("exp_level", "project_level", "screening_result")
        .show(false)
    }
  }

  private def analyzeKeyFactors(data: DStream[resume]): Unit = {
    data.foreachRDD { rdd =>
      val spark = SparkSession.builder.config(rdd.sparkContext.getConf).getOrCreate()
      import spark.implicits._

      rdd.toDF()
        .groupBy("screening_result")
        .agg(
          avg($"small_scale_project").alias("avg_small"),
          avg($"middle_scale_project").alias("avg_middle"),
          avg($"large_scale_project").alias("avg_large")
        )
        .show(false)
    }
  }
}