package cn.seecoder.ai.service

import cn.seecoder.ai.utils.Constant
import cn.seecoder.ai.enums.MachineLearningAlgorithmTypeEnum
import cn.seecoder.ai.model.vo.{FileInfoVO, ModelVO}
import cn.seecoder.ai.utils.HdfsHelper
import cn.seecoder.ai.utils.MLAlgorithmHelper.getMachineLearningAlgorithmTypeEnum
import org.apache.spark.ml.PipelineModel
import org.apache.spark.ml.feature.StringIndexerModel
import org.apache.spark.rdd.RDD
import org.apache.spark.sql
import org.apache.spark.sql.functions.{col, element_at, log, split, udf}
import org.apache.spark.sql.types.{ArrayType, DataType, DoubleType, LongType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.stereotype.Service

import scala.collection.mutable
import org.apache.spark.ml.linalg.Vector

@Service
class PredictionScalaService {


  @Autowired
  private var modelService:ModelService = _

  @Autowired
  private var sparkSession :SparkSession = _

  @Autowired
  private var configService :ConfigService = _

  @Autowired
  private var fileInfoService : FileInfoService = _

  @Autowired
  private var featureEngineeringService:FeatureEngineeringService = _

  @Autowired
  private var hdfsHelper: HdfsHelper = _


  /**
   * 使用pipeline模型进行预测,
   * 对带预测数据进行预测，将预测数据与原文件组合，保存到远端，并返回保存的位置；
   * 对预测数据进行处理，生成示例数据，可以供前端查看，csv格式，使用逗号
   *
   * @author   fanyanpeng
   * @date 2023/4/22 1:27
   * @param fileInfoId
   * @param modelId
   * @param pipelineModel
   * @return scala.Tuple2<java.lang.String,java.lang.String> 返回文件保存uri和示例数据
   */
  def usePipeLineModelToPredict(fileInfoId: Integer,
                                modelId: Integer,
                                pipelineModel: PipelineModel): Tuple2[String, String] = {


    // 获取预测得到的标签列
    val predictedResult = getPredictedResult(fileInfoId, modelId, pipelineModel)

    predictedResult.show(10)
    val fileInfoVO: FileInfoVO = fileInfoService.getFileInfo(0, fileInfoId)
    val fileUri = fileInfoVO.getFileUri
    var originalTable: DataFrame = sparkSession.read.parquet(fileUri)

    //通过标签列和原文件，组装生成新的表格，作为最终生成结果
    var predictionTable = joinDataFrameWithSameRowCount(predictedResult,originalTable)

    //获取预测文件应该保存的位置
    val predictionUri = hdfsHelper.getUriPrediciton(modelId, fileInfoId)

    //将预测数据缩减备份到一份，并保存表头，保存到csv文件
    predictionTable.coalesce(1).write
      .mode(SaveMode.Overwrite)
      .option("header", "true")
      .option("encoding", "UTF-8")
      .csv(predictionUri)

    //获取示例数据
    val exampleData = headToString(predictionTable)

    (predictionUri, exampleData)
  }


  /**
   * 根据模型获取预测的结果
   *
   * @author   fanyanpeng
   * @date 2023/4/22 1:41
   * @param fileInfoId 待预测文件id
   * @param modelId 模型对应的模型
   * @param pipelineModel 模型
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   */
  private def getPredictedResult(fileInfoId: Integer,
                                 modelId: Integer,
                                 pipelineModel: PipelineModel): DataFrame = {

    val modelVO = modelService.getModelVO(0, modelId)

    val configId = modelVO.getConfigId

    val libSvmUri = featureEngineeringService.processSingleFile(
      configId,
      fileInfoId,
      hdfsHelper.getUriLibSvm(modelId, fileInfoId),
      hdfsHelper.getUriFeatureStringIndexerPipeline(modelId),
      hdfsHelper.getUriLabelStringIndexer(modelId),
      modelVO.getMachineLearningAlgorithm,
      isTrainData = false)

    val algorithmType = getMachineLearningAlgorithmTypeEnum(modelVO.getMachineLearningAlgorithm)
    //加载待预测数据
    val df = sparkSession.read.format("libsvm").load(libSvmUri)
    // 应用模型，prediction为预测结果
    val prediction = pipelineModel.transform(df)
    var predictedResult: DataFrame =null
    println("1预测结果")
    prediction.show()

    algorithmType match {
      case MachineLearningAlgorithmTypeEnum.CLASSIFICATION =>{
        // 获取预测的标签，为数值类型
        predictedResult = prediction.select("prediction", "probability")
        //获取标签列的转化器的uri，用于标签列的逆转换，例如：1.0->好瓜，0.0->坏瓜
        val labelStringIndexerUri = hdfsHelper.getUriLabelStringIndexer(modelId)

        //对于string类型，需要恢复标签（因为libsvm中全为数值类型，恢复由特征工程服务负责）
        predictedResult = featureEngineeringService.restoreAColumnUseStringIndexer(predictedResult,
          labelStringIndexerUri,
          "prediction",
          "predicted_label")

        predictedResult.show()
        //获取标签列的转化器，用于获取标签列的标签
        val stringIndexerModel: StringIndexerModel = StringIndexerModel.load(labelStringIndexerUri)

        val vec2Array = udf((v: org.apache.spark.ml.linalg.Vector) => v.toArray)
        val indexToLabel = stringIndexerModel.labels.zipWithIndex.map(_.swap).toMap

        val length = indexToLabel.size
        // 添加每一种标签对应的概率
        for (index <- 0 until length) {
          predictedResult = predictedResult.withColumn("probability_" + indexToLabel(index), vec2Array(col("probability")).getItem(index))
        }
        predictedResult = predictedResult.drop("probability")

        predictedResult.show()
        predictedResult.schema.printTreeString()

        //struct<type:tinyint,size:int,indices:array<int>,values:array<double>>

        predictedResult.show()
      }
      case MachineLearningAlgorithmTypeEnum.REGRESSION=>{
        predictedResult = prediction.select("prediction").toDF("predicted_值")
      }
      case MachineLearningAlgorithmTypeEnum.CLUSTERING => {
        predictedResult = prediction.select("prediction").toDF("predicted_种类")
      }
    }

    //返回预测的模型
    predictedResult
  }


  /**
   * 合并行数相同的两个dataFrame
   * @author   fanyanpeng
   * @date 2023/4/22 1:30
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   */
  private def joinDataFrameWithSameRowCount(df1:DataFrame,df2:DataFrame): DataFrame ={
    val originalTableWithId = addId(df1)
    val predictedLabelWithId = addId(df2)

    val predictionTable = originalTableWithId.join(predictedLabelWithId, tmpOrderId)
    predictionTable.orderBy(tmpOrderId).drop(col(tmpOrderId))
  }


  private final val tmpOrderId:String = "tmp_order_id"



  /**
   * 将dataFrame的开头10行数据转化为string格式，此处的实现是转化为了csv文件格式
   * @author   fanyanpeng
   * @date 2023/4/22 1:13
   * @param spark DataFrame
   * @return 返回转换为的字符串
   */
  private def headToString(dataFrame: DataFrame): String = {
    val fieldCount = dataFrame.schema.fieldNames.length
    var sb:StringBuilder = new StringBuilder();
    sb.append(dataFrame.schema.fieldNames.mkString(",")).append("\n")
    dataFrame.head(Constant.CSV_HEAD_COUNT).foreach(row=>{
      sb.append(row.mkString(",")).append("\n")
    })
    sb.toString()
  }

  /**
   * 为dataFrame添加序号
   * 用于组合元数据与预测数据
   * @author   fanyanpeng
   * @date 2023/4/22 1:14
   * @param dataFrame
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   */
  private def addId(dataFrame: DataFrame): DataFrame = {
    val schema: StructType = dataFrame.schema.add(StructField(tmpOrderId, LongType))
    // DataFrame转RDD 然后调用 zipWithIndex
    //zipWithIndex返回的是元组数组：Array((Sunday,0), (Monday,1), ...
    //每一元组，第一个为原来的Row，第二项是index；
    val dfRDD: RDD[(Row, Long)] = dataFrame.rdd.zipWithIndex()

    //合并数组中的元组，第二项需要转换为Row
    val rowRDD: RDD[Row] = dfRDD.map(tp => Row.merge(tp._1, Row(tp._2)))

    // 将添加了索引的RDD 转化为DataFrame
    val df2 = sparkSession.createDataFrame(rowRDD, schema)
    return df2;
  }

}
