package cn.seecoder.ai.algorithm.ml

import cn.seecoder.ai.enums.TrainParamEnum
import cn.seecoder.ai.model.bo.TrainParamsBO
import cn.seecoder.ai.utils.TrainParamReader.readParam
import org.apache.spark.ml.{Pipeline, PipelineModel, PipelineStage}
import org.apache.spark.ml.evaluation.{ClusteringEvaluator, MulticlassClassificationEvaluator}
import org.apache.spark.ml.feature.VectorIndexer
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.slf4j.{Logger, LoggerFactory}

import java.util

/**
 * todo: 聚类算法的模板类，具体实现交由子类完成。
 *
 * @author   fanyanpeng
 * @date 2023/4/17 5:17
 * @param null
 * @return
 */
class BaseClustering extends BaseAlgorithm {


  val log: Logger = LoggerFactory.getLogger(getClass)


  override def train(sparkSession: SparkSession,
            libSvmFileUri: String,
            trainParams: TrainParamsBO): (PipelineModel, util.Map[String, Object]) = {

    val trainSet = sparkSession.read.format("libsvm").load(libSvmFileUri)

    //获取最大种类，若读取失败，获取默认值
    val maxCategories = trainParams.maxCategories


    val featureIndexer = new VectorIndexer()
      .setInputCol("features")
      .setOutputCol("indexedFeatures")
      .setMaxCategories(maxCategories).setHandleInvalid("keep")

    trainSet.show()


    /** **************************************** */
    //需要由子类实现
    val machineLearningStage = buildMachineLearningStage(trainParams)

    /** **************************************** */

    val pipeline = new Pipeline()
      .setStages(Array(featureIndexer, machineLearningStage))

    // Train model. This also runs the indexers.
    val pipelineModel: PipelineModel = pipeline.fit(trainSet)


    val resultMap: util.Map[String, Object] = evaluate(pipelineModel, trainSet, machineLearningStage)

    return (pipelineModel, resultMap)

  }


  def buildMachineLearningStage(trainParams: TrainParamsBO): PipelineStage = {
    //由子类实现
    null
  }

  /**
   * 对于聚类算法，训练集即测试集合，用于评判自己的分类情况
   * @author   fanyanpeng
   * @date 2023/4/20 1:32
   * @param pipelineModel
   * @param subTestSet
   * @param machineLearningStage
   * @return java.util.Map<java.lang.String,java.lang.Object>
   */
  def evaluate(pipelineModel: PipelineModel, subTestSet: DataFrame, machineLearningStage: PipelineStage): util.Map[String, Object] = {

    val resultMap: util.Map[String, Object] = new util.HashMap[String, Object]()
    // Make predictions.
    val predictions = pipelineModel.transform(subTestSet)

    val evaluator = new ClusteringEvaluator()

    evaluator.setFeaturesCol("indexedFeatures")
      .setPredictionCol("prediction")
    val silhouette = evaluator.setMetricName("silhouette").evaluate(predictions)

    val model = pipelineModel.stages(0).asInstanceOf[machineLearningStage.type]
    log.info("\n\nLearned model:\n" + model.toString());

    resultMap.put("silhouette", silhouette.toString)

    resultMap
  }
}
