package cn.seecoder.ai.service

import cn.seecoder.ai.dao.ModelRepository
import cn.seecoder.ai.enums.{MachineLearningAlgorithmEnum, MachineLearningAlgorithmTypeEnum, TrainParamEnum}
import cn.seecoder.ai.exception.AIExternalException
import cn.seecoder.ai.model.vo.{ConfigVO, FileFieldVO, FileInfoVO}
import cn.seecoder.ai.utils.HdfsHelper
import cn.seecoder.ai.utils.MLAlgorithmHelper.getMachineLearningAlgorithmTypeEnum
import org.apache.spark.ml.feature.{IndexToString, StringIndexer, StringIndexerModel, VectorAssembler}
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.sql.functions.{col, lit, to_date}
import org.apache.spark.sql.types.{DataType, DoubleType, IntegerType, LongType, StringType, TimestampType}
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.slf4j.{Logger, LoggerFactory}
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.stereotype.Service
import org.springframework.util.Assert

import java.util
import collection.mutable._
import scala.jdk.CollectionConverters.asScalaBufferConverter
/**
 * 方法数目有限，不使用方法分开的模式。
 *
 * @author   fanyanpeng
 * @date 2023/4/17 6:04
 * @param null
 * @return
 */
@Service
class FeatureEngineeringService {

  @Autowired
  private var sparkSession: SparkSession = _

  @Autowired
  private var hdfsHelper: HdfsHelper = _

  @Autowired
  private var fileInfoService:FileInfoService = _

  @Autowired
  private var configService:ConfigService = _

  @Autowired
  private var modelRepository: ModelRepository = _

  private val log: Logger = LoggerFactory.getLogger(getClass)

  private final val indexedLabel = "label_index"

  private def updateTrainDataSetOccupy(trainDataFrame:DataFrame,testDataFrame:DataFrame): Unit ={
    val trainDataSetCount = trainDataFrame.count()
    val testDataSetCount = testDataFrame.count()
    val trainDataSetOccupy : Double = trainDataSetCount*1.0 / (trainDataSetCount+testDataSetCount)
    val model = modelRepository.findById(1).get()

    val trainParams = model.getTrainParams
    trainParams.put(TrainParamEnum.TRAIN_DATA_SET_OCCUPY,trainDataSetOccupy.toString)
    model.setTrainParams(trainParams)
    log.info("model trainDataSetOccupy set, value is {}",trainDataSetOccupy)
    // save the model
    modelRepository.saveAndFlush(model)
  }

  private def getFileInfoVO(fileInfoId: Integer): FileInfoVO = {
    val fileInfoVO: FileInfoVO = fileInfoService.getFileInfo(1, fileInfoId)
    Assert.notNull(fileInfoVO, "文件信息不存在")
    fileInfoVO
  }
  private def getConfigVO(configId: Integer): ConfigVO = {
    val configVO: ConfigVO = configService.getConfig(1, configId)
    Assert.notNull(configVO, "配置信息不存在")
    configVO
  }


    /**
     *
     * @author   fanyanpeng
     * @date 2023/5/7 1:05
     * @param configId 模型配置id
     * @param fileInfoId 文件id
     * @param libSvmFileUri libSvm文件存储位置
     * @param featureStringIndexerPipelineUri 特征转换模型存储位置
     * @param labelStringIndexerUri 标签转换模型存储位置
     * @param machineLearningAlgorithmEnum 机器学习算法枚举
     * @param isTrainData 是否是训练数据
     * @return java.lang.String
     */
  def processSingleFile(configId: Integer,
                        fileInfoId: Integer,
                        libSvmFileUri: String,
                        featureStringIndexerPipelineUri: String,
                        labelStringIndexerUri: String,
                        machineLearningAlgorithmEnum: MachineLearningAlgorithmEnum,
                        isTrainData: Boolean): String = {
    val fileInfoVO: FileInfoVO = getFileInfoVO(fileInfoId)
    var fileDataFrame: DataFrame = sparkSession.read.parquet(fileInfoVO.getFileUri)

    val configVO: ConfigVO = getConfigVO(configId)
    val features = configVO.getFeatures.asScala.toArray
    val label = configVO.getLabel

    processDF(
      fileDataFrame,
      fileInfoVO.getFileFields,
      features,
      label,
      libSvmFileUri,
      featureStringIndexerPipelineUri,
      labelStringIndexerUri,
      machineLearningAlgorithmEnum,
      isTrainData
    )
  }

  def processTrainAndTestFile(configId: Integer,
                              trainFileId: Integer,
                              testFileId: Integer,
                              libSvmFileUri: String,
                              featureStringIndexerPipelineUri: String,
                              labelStringIndexerUri: String,
                              machineLearningAlgorithmEnum: MachineLearningAlgorithmEnum): String = {
    val trainFileInfoVO: FileInfoVO = getFileInfoVO(trainFileId)
    var trainDataFrame: DataFrame = sparkSession.read.parquet(trainFileInfoVO.getFileUri)

    val testFileInfoVO: FileInfoVO = getFileInfoVO(testFileId)
    var testDataFrame: DataFrame = sparkSession.read.parquet(testFileInfoVO.getFileUri)

    val configVO: ConfigVO = getConfigVO(configId)
    val features = configVO.getFeatures.asScala.toArray
    val label = configVO.getLabel

    updateTrainDataSetOccupy(trainDataFrame,testDataFrame)

    val fileDataFrame = trainDataFrame.union(testDataFrame)

    processDF(
      fileDataFrame,
      trainFileInfoVO.getFileFields,
      features,
      label,
      libSvmFileUri,
      featureStringIndexerPipelineUri,
      labelStringIndexerUri,
      machineLearningAlgorithmEnum,
      true
    )
  }


  private def processDF(fileDataFrame:DataFrame,
                        fileFields: util.List[FileFieldVO],
                        features:Array[String],
                        label:String,
                        libSvmFileUri:String,
                        featureStringIndexerPipelineUri:String,
                        labelStringIndexerUri:String,
                        machineLearningAlgorithmEnum: MachineLearningAlgorithmEnum,
                        isTrainData: Boolean): String = {

    var dataSet: DataFrame = fileDataFrame
    dataSet.show(10)

    val algorithmType = getMachineLearningAlgorithmTypeEnum(machineLearningAlgorithmEnum);


    // 特征筛选
    dataSet = selectColumns(isTrainData, dataSet, features, label, algorithmType)

    // 数据过滤
    dataSet = filterDataSet(isTrainData, dataSet, label, algorithmType)

    //处理非数值、字符、时间类型
    dataSet = dealWithOtherDataType(dataSet, features)

    //处理时间类型
    dataSet = dealWithTimeStamp(dataSet, features)

    // 填充缺省值
    dataSet = fillNullWithMode(dataSet, fileFields, features)

    // 特征映射
    dataSet = indexFeatures(isTrainData, dataSet, features, featureStringIndexerPipelineUri, algorithmType)

    // 标签映射
    dataSet = indexLabel(isTrainData, dataSet, features, label, labelStringIndexerUri, machineLearningAlgorithmEnum)

    // 合并特征向量
    dataSet = assembleToVector(isTrainData, dataSet, features,  algorithmType)

    // 导出为libsvm文件，返回地址

    var libsvmUri = saveToLibSvmFile(dataSet, libSvmFileUri)
    libsvmUri
  }


  private def   isClusteringAlgorithm(algorithmType: MachineLearningAlgorithmTypeEnum):Boolean = {
    algorithmType == MachineLearningAlgorithmTypeEnum.CLUSTERING
  }


  private def selectColumns(isTrainData: Boolean,
                            dataSet: DataFrame,
                            features: Array[String],
                            label: String,
                            algorithmType: MachineLearningAlgorithmTypeEnum): DataFrame = {
    //非训练数据、聚类不需要label列
    val selectedColumnsNames: Array[String] = if (isTrainData && !isClusteringAlgorithm(algorithmType)) {
      Array(label) ++ features
    } else {
      features
    }
    dataSet.select(selectedColumnsNames.head, selectedColumnsNames.tail: _*).persist()
  }

  private def filterDataSet(isTrainData: Boolean,
                            dataSet: DataFrame,
                            label: String,
                            algorithmType: MachineLearningAlgorithmTypeEnum): DataFrame = {
    if (isTrainData && !isClusteringAlgorithm(algorithmType)) {
      dataSet.filter(col(label).isNotNull)
    } else {
      dataSet
    }
  }

  /**
   * 修正版本的填充空值：采取策略，填充值为众数
   * @author   fanyanpeng
   * @date 2023/4/21 1:11
   * @param dataSet
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   */
  private def fillNull(dataSet: DataFrame): DataFrame = {

    val stringTypeFeatures = dataSet.schema.filter(_.dataType.equals(StringType)).map(_.name)
    dataSet.na.fill("NA", stringTypeFeatures)
  }


  private def dealWithTimeStamp(dataSet: DataFrame,
                                features: Array[String]): DataFrame = {

    var localDataSet = dataSet
    for (feature <- features) {
      if (dataSet.schema(feature).dataType == TimestampType) {
        localDataSet = localDataSet.withColumn(feature, col(feature).cast(LongType))
        //        to_date(col(feature),"yyyy/MM/dd")
      }
    }
    localDataSet

  }


  /**
   * 若属于不支持的范围，统统转换成String
   * @author   fanyanpeng
   * @date 2023/4/21 5:50
   * @param dataSet
   * @param features
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   */
  private def dealWithOtherDataType(dataSet: DataFrame, features: Array[String]): DataFrame = {
    val acceptedDataType = Array(StringType,IntegerType,LongType,DoubleType,TimestampType)
    var localDataSet = dataSet
    for (feature <- features) {
      if ( !acceptedDataType.contains(dataSet.schema(feature).dataType)) {
        localDataSet = localDataSet.withColumn(feature, col(feature).cast(StringType))
      }
    }
    localDataSet
  }

  /**
   * 修正版本的填充空值：采取策略，填充值为众数
   *
   * @author fanyanpeng
   * @date 2023/4/21 1:11
   * @param dataSet
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   */
  private def fillNullWithMode(dataSet: DataFrame,
                               fileFields: util.List[FileFieldVO],
                               features:Array[String]): DataFrame = {

    var localDataSet = dataSet
    //对于每一个字段，填充null为当前字段的mode
    for( fileField <- fileFields.asScala){

      if(!features.contains(fileField.getFieldName)){
        log.info("field {} is not one of the features",fileField.getFieldName)
      }else{
        log.info("field {} is one of the features, begin to fill null to mode of this field",fileField.getFieldName)

        val fieldName = fileField.getFieldName
        val fieldMode = fileField.getMode

        localDataSet = localDataSet.schema(fieldName).dataType match {
          case StringType => {
            var defaultValue = "NA"
            if(fieldMode != null){
              defaultValue= fieldMode.toString
            }
            localDataSet.na.fill(defaultValue, Array(fieldName))
          }
          case DoubleType =>{
            var defaultValue = 0.0
            if (fieldMode != null) {
              defaultValue = fieldMode.toString.toDouble
            }
            localDataSet.na.fill(defaultValue, Array(fieldName))
          }
          case IntegerType =>{
            var defaultValue = 0
            if (fieldMode != null) {
              defaultValue = fieldMode.toString.toInt
            }
            localDataSet.na.fill(defaultValue, Array(fieldName))
          }
          case LongType =>{
            var defaultValue = 0L
            if (fieldMode != null) {
              defaultValue = fieldMode.toString.toLong
            }
            localDataSet.na.fill(defaultValue, Array(fieldName))
          }
          case _ =>{
            localDataSet
          }
        }
      }
    }

    localDataSet
  }




  private def indexFeatures(isTrainData: Boolean,
                            dataSet: DataFrame,
                            features: Array[String],
                            featureIndexerPipelineUri: String,
                            algorithmType: MachineLearningAlgorithmTypeEnum): DataFrame = {
    var localDataSet = dataSet

    // 筛选字符型
    val stringTypeFeatures = features.filter(feature => dataSet.schema(feature).dataType.equals(StringType)).map(f=>f)

    //若存在字符类型特征
    if (stringTypeFeatures.nonEmpty) {
      log.info("待转换字符型特征有" + stringTypeFeatures.length + "个：" + stringTypeFeatures.toString())

      var pipelineModel: PipelineModel = null;
      if (isTrainData) {
        //字符类型特征 -> 数值类型特征
        val stringTypeFeatureIndexers = stringTypeFeatures.map(stringTypeFeature => {
          new StringIndexer()
            .setInputCol(stringTypeFeature)
            .setOutputCol(stringTypeFeature + "_index")
            .setHandleInvalid("keep") /* Unseen label: 黑. To handle unseen labels, set Param handleInvalid to keep.*/
            .fit(dataSet)
        })
        pipelineModel = new Pipeline().setStages(stringTypeFeatureIndexers.toArray).fit(dataSet)
        pipelineModel.write.overwrite().save(featureIndexerPipelineUri)
        log.info("转换模型适配完成，已保存到远端：" + featureIndexerPipelineUri)
      }
      else {
        pipelineModel = PipelineModel.load(featureIndexerPipelineUri)
        log.info("从远程加载完成")
      }
      //字符串特征转化为数值类型
      localDataSet = pipelineModel.transform(dataSet)
      log.info("训练集-字符串特征转化为数值类型：")
      localDataSet.show(10)
    }

    localDataSet = localDataSet.drop(stringTypeFeatures: _*)
    localDataSet
  }


  /**
   * 经过该方法，所有内容都写入”label“列
   *
   * @author fanyanpeng
   * @date 2023/4/17 23:27
   * @param isTrainData
   * @param dataSet
   * @param features
   * @param label
   * @param modelId
   * @param machineLearningAlgorithmEnum 机器学习模型，特异性处理
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   */
  private def indexLabel(isTrainData: Boolean,
                         dataSet: DataFrame,
                         features: Array[String],
                         label: String,
                         labelStringIndexerUri: String,
                         machineLearningAlgorithmEnum: MachineLearningAlgorithmEnum): DataFrame = {
    dataSet.show(5)
    val algorithmTypeEnum = getMachineLearningAlgorithmTypeEnum(machineLearningAlgorithmEnum)

    if(!isTrainData){ //对于非训练数据，不处理label列
      return dataSet
    }

    var localDataSet = dataSet


    //算法类型
    algorithmTypeEnum match {
      //分类算法
      case MachineLearningAlgorithmTypeEnum.CLASSIFICATION => {

        // it's string type before stringIndexer.
        localDataSet = localDataSet.withColumn(label,col(label).cast(StringType))
        // do index anyway
        val labelStringIndexer = new StringIndexer().setInputCol(label).setOutputCol(indexedLabel).fit(dataSet)

        labelStringIndexer.write.overwrite().save(labelStringIndexerUri)
        localDataSet = labelStringIndexer.transform(dataSet)
        log.info("labelStringIndexerUri saved:" + labelStringIndexerUri)
      }
      //回归算法
      case MachineLearningAlgorithmTypeEnum.REGRESSION => {
        // if not number, throw a error
        val labelType = localDataSet.schema(label).dataType
        if (labelType != IntegerType && labelType != DoubleType && labelType != LongType) { //not one of these，抛出异常
          throw AIExternalException.regressionLabelMustBeNumber()
        }
        //统一转成Double，符合LibSvm格式
        localDataSet = dataSet.withColumn(indexedLabel, col(label).cast(DoubleType))
      }
      //聚类算法
      case MachineLearningAlgorithmTypeEnum.CLUSTERING =>{
        return localDataSet
      }
    }
    localDataSet = localDataSet.drop(label)
    localDataSet.show(10)
    localDataSet
  }



  /**
   * 将数据集的特征转换为一个向量
   * @author   fanyanpeng
   * @date 2023/4/24 5:22
   * @param isTrainData
   * @param dataSet
   * @param features
   * @param label
   * @param modelId
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   */
  private def assembleToVector(isTrainData: Boolean,
                               dataSet: DataFrame,
                               features: Array[String],
                               algorithmType: MachineLearningAlgorithmTypeEnum): DataFrame = {
    var localDataSet = dataSet
    val columns = dataSet.columns

    val featuresIndexed = features.map(feature=>feature+"_index")
    //过滤标签，取出特征列表
    val featureList: Array[String] = columns.filter(columnName => features.contains(columnName) || featuresIndexed.contains(columnName))
    featureList.foreach(a=>println(a))

    //将特征列表合成向量
    var assembler = new VectorAssembler()
      .setInputCols(featureList)
      .setOutputCol("features")

    localDataSet = assembler.transform(dataSet)

    // for not training data, add a {label}_index column
    if (!isTrainData || isClusteringAlgorithm(algorithmType)) {
      localDataSet = localDataSet.withColumn(indexedLabel, lit(0.0))
    }

    localDataSet = localDataSet
      .select(indexedLabel, "features")
      .toDF("label", "features") //重命名

    localDataSet.show()

    localDataSet
  }


  private def saveToLibSvmFile(dataSet: DataFrame,
                               libSvmFileUri: String): String = {
    dataSet.write.mode(SaveMode.Overwrite)
      .format("libsvm")
      .save(libSvmFileUri)
    libSvmFileUri
  }


  /**
   * 恢复一列
   * @author   fanyanpeng
   * @date 2023/4/24 2:17
   * @param dataFrame
   * @param modelId
   * @param inName
   * @param outName
   * @return org.apache.spark.sql.Dataset<org.apache.spark.sql.Row>
   */
  def restoreAColumnUseStringIndexer(dataFrame: DataFrame, labelStringIndexerUri:String, inName:String, outName:String): DataFrame ={
    var localDataFrame = dataFrame

    try {
      val stringIndexerModel: StringIndexerModel = StringIndexerModel.load(labelStringIndexerUri)

      val convert: IndexToString = new IndexToString()
        .setInputCol(inName)
        .setOutputCol(outName)
        .setLabels(stringIndexerModel.labelsArray(0))

      //进行转换并删除原列
      localDataFrame = convert.transform(localDataFrame)

      // 当不相等，需要删除原来的那一列
      if(inName != outName) {
        localDataFrame = localDataFrame.drop(col(inName))
      }

    } catch {
      case e: Exception => {
        log.info("转换失败" + e)
      }
    }
    localDataFrame
  }

}
