package cn.itcast.tags.tools
import cn.itcast.tags.config.ModelConfig
import cn.itcast.tags.utils.HdfsUtils
import org.apache.hadoop.conf.Configuration
import org.apache.spark.internal.Logging
import org.apache.spark.ml.{Model, Pipeline, PipelineModel}
import org.apache.spark.ml.classification.{DecisionTreeClassificationModel, DecisionTreeClassifier}
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.evaluation.{BinaryClassificationEvaluator, MulticlassClassificationEvaluator}
import org.apache.spark.ml.feature.{VectorAssembler, VectorIndexer}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.tuning.{CrossValidator, CrossValidatorModel, ParamGridBuilder, TrainValidationSplit, TrainValidationSplitModel}
import org.apache.spark.sql.DataFrame
import org.apache.spark.storage.StorageLevel
/**
 * 算法模型工具类：专门依据数据集训练算法模型，保存及加载
 */
object MLModelTools extends Logging {

  //调整超参数获取最佳模型
  def trainBestKMeansModel(dataframe: DataFrame,kClusters:Int): KMeansModel ={
    val maxIters: Array[Int] = Array( 10, 20,50)
    // 2.不同超参数的值，训练模型
    val models: Array[(Double, KMeansModel, Int)] = maxIters.map{
      maxIter =>
        // a. 使用KMeans算法应用数据训练模式
        val kMeans: KMeans = new KMeans()
          .setFeaturesCol("features")
          .setPredictionCol("prediction")
          .setK(kClusters) // 设置聚类的类簇个数
          .setMaxIter(maxIter)
        //.setSeed(31) // 实际项目中，需要设置值
        // b. 训练模式
        val model: KMeansModel = kMeans.fit(dataframe)
        // c. 模型评估指标WSSSE
        val ssse = model.computeCost(dataframe)
        // d. 返回三元组(评估指标, 模型, 超参数的值)
        (ssse, model, maxIter)
    }
    //models.foreach(println)
    // 3.获取最佳模型
    val (_, bestModel, _) = models.minBy(tuple => tuple._1)
    // 4.返回最佳模型
    bestModel
  }


  /**
   * 加载模型，如果模型不存在，使用算法训练模型
   *
   * @param dataframe
   * @param mlType 算法名称
   * @param modelPath 模型路径
   * @return
   */
  def loadModel(dataframe: DataFrame, mlType: String, clazz:Class[_]): Model[_] = {
    val modelPath: String = s"${ModelConfig.MODEL_BASE_PATH}${clazz.getSimpleName.stripSuffix("$")}"
    val conf: Configuration = dataframe.sparkSession.sparkContext.hadoopConfiguration
    val modelExists: Boolean = HdfsUtils.exists(conf , modelPath)
    if(modelExists){
      logWarning(s"===============正在从${modelPath}加载模型==========")
      mlType.toLowerCase match {
        case "rfm" => KMeansModel.load(modelPath)
        case "rfe" => KMeansModel.load(modelPath)
        case "psm" => KMeansModel.load(modelPath)
        case "usg" => PipelineModel.load(modelPath)
      }
    }else{
      //如果模型不存在，获取最佳模型
      logWarning(s"===============正在训练最佳模型==========")
      //val model: KMeansModel = trainBestModel(dataframe)
      val bestModel = mlType.toLowerCase match {
        case "rfm" => trainBestKMeansModel(dataframe,5)
        case "rfe" => trainBestKMeansModel(dataframe,4)
        case "psm" => trainBestKMeansModel(dataframe,5)
        case "usg" => trainBestPipelineModel(dataframe)
      }
      logWarning(s"===============正在保存模型到${modelPath}==========")
      bestModel.save(modelPath)
      bestModel
    }
  }


  /**
   * 采用K-Fold交叉验证方式，调整超参数获取最佳PipelineModel模型
   * @param dataframe 数据集
   * @return
   */
  def trainBestPipelineModel(dataframe: DataFrame): PipelineModel = {
    //userId color product label
    // a. 特征向量化
    val assembler: VectorAssembler = new VectorAssembler()
      .setInputCols(Array("color", "product"))
      .setOutputCol("raw_features")
    // b. 类别特征进行索引
    val vectorIndexer: VectorIndexer = new VectorIndexer()
      .setInputCol("raw_features")
      .setOutputCol("features")
      .setMaxCategories(30) //最大类别

    // c. 构建决策树分类器
    val dtc: DecisionTreeClassifier = new DecisionTreeClassifier()
      .setFeaturesCol("features")
      .setLabelCol("label")
      .setPredictionCol("prediction")

    val pipeline: Pipeline = new Pipeline()
      .setStages(Array(assembler, vectorIndexer, dtc))

    //创建一个网格参数对象实例
    val paramGrid: Array[ParamMap] = new ParamGridBuilder()
      .addGrid(dtc.maxDepth, Array(5, 10))
      .addGrid(dtc.impurity, Array("gini", "entropy"))
      .addGrid(dtc.maxBins, Array(32, 64)) //树的叶子数目
      .build()

    //多分类评估器
    val evaluator = new MulticlassClassificationEvaluator()
      .setLabelCol("label")
      .setPredictionCol("prediction")
      // 指标名称，支持：f1、weightedPrecision、weightedRecall、accuracy
      .setMetricName("accuracy")


    // g. 创建交叉验证对象，设置算法，评估器和数据集占比
    val cv: CrossValidator = new CrossValidator()
      .setEstimator(pipeline) //算法，此处为管道
      .setEvaluator(evaluator)//设置模型评估器
      .setEstimatorParamMaps(paramGrid) //算法超参数
      .setNumFolds(3) //将数据集划分为几份，其中一份为验证数据集，其余为训练及

    // h. 训练模型
    val cvModel: CrossValidatorModel = cv.fit(dataframe)

    //获取最佳模型
    val pipelineModel: PipelineModel = cvModel.bestModel.asInstanceOf[PipelineModel]
    pipelineModel

  }
}
