package org.apache.spark.ml.feature

import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.ml.feature.LibSVMParserModel.LibSVMParserModelWriter
import org.apache.spark.ml.param._
import org.apache.spark.ml.util._
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.sql.functions.{struct, udf}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions._

/**
  * @author xiongjun
  * @date 2019/12/25 10:10
  * @description
  * @reviewer
  */
trait LibSVMParserBase extends Params {
  final val inputCols = new StringArrayParam(this, "inputCols", "input columns name")
  def getInputCols:Array[String]=$(inputCols)
  final val outputCols = new StringArrayParam(this, "outputCols", "output columns name")
  def getOutputCols:Array[String]=$(outputCols)
  final val returnValue = new BooleanParam(this, "returnValue", "is return value?")
  setDefault(returnValue,false)
  def isReturnValue:Boolean=$(returnValue)
  def setInputCols(value:Array[String]):this.type  = set(inputCols,value)
  def setOutputCols(value:Array[String]):this.type  = set(outputCols,value)
  def setReturnValue(value:Boolean):this.type  = set(returnValue,value)
  protected def validateAndTransformSchema(schema: StructType): StructType = {
    val inputFields = schema.fields
    val outputColNames = $(outputCols)
    require(!outputColNames.exists(outName => schema.fieldNames.contains(outName)),
      s"Output column already exists.")
    val outputFields = inputFields ++ outputColNames.map(colName=>StructField(colName, StringType))
    StructType(outputFields)
  }
}
class LibSVMParser(override val uid: String)
  extends Estimator[LibSVMParserModel] with LibSVMParserBase with DefaultParamsWritable  {
  def this() = this(Identifiable.randomUID("libsvm_parser"))

  override def fit(dataset: Dataset[_]): LibSVMParserModel = {
    transformSchema(dataset.schema,logging = true)
    // 每一个输入列的 feature 初始索引值映射表
    var start = Map[String, Int]()
    // 每一个输入列的字典射表
    var vocabulary = Map[String, Map[String,Int]]()

    // feature 初始索引累计值 startAcc
    var startAcc = 0
    $(inputCols).foreach(inputCol=>{
      val inputDataType = dataset.schema(inputCol).dataType
      val uniqueData = if(inputDataType==IntegerType){
        dataset.na.drop(Array(inputCol)).select(inputCol).rdd.map(_.get(0).toString.toInt).distinct().collect().sorted.map(_.toString)
      }else if(inputDataType==DoubleType || inputDataType==FloatType){
        dataset.na.drop(Array(inputCol)).select(inputCol).rdd.map(_.get(0).toString.toDouble).distinct().collect().sorted.map(_.toString)
      }else{
        dataset.na.drop(Array(inputCol)).select(inputCol).rdd.flatMap(_.get(0).toString.split(",")).distinct()
          .collect()
      }
      start += (inputCol -> startAcc)
      if($(returnValue)){
        val range = startAcc to startAcc+uniqueData.length+1
        val dict = uniqueData.zip(range).toMap
        vocabulary += (inputCol -> dict)
        startAcc+=uniqueData.length+1
      }else{
        val range = startAcc to startAcc+uniqueData.length
        val dict = uniqueData.zip(range).toMap
        vocabulary += (inputCol -> dict)
        startAcc+=uniqueData.length
      }

    })
    copyValues(new LibSVMParserModel(uid, start, vocabulary))
  }

  override def copy(extra: ParamMap): Estimator[LibSVMParserModel] = defaultCopy(extra)

  override def transformSchema(schema: StructType): StructType = validateAndTransformSchema(schema)
}
object LibSVMParser extends DefaultParamsReadable[LibSVMParser] {
  override def load(path: String): LibSVMParser = super.load(path)
}

class LibSVMParserModel(
                         override val uid: String,
                         val start: Map[String, Int],
                         val vocabulary: Map[String, Map[String,Int]])
  extends Model[LibSVMParserModel] with LibSVMParserBase with MLWritable{

  def this(start: Map[String,Int], vocabulary: Map[String, Map[String,Int]]) =
    this(Identifiable.randomUID("libsvm_parser"),start,vocabulary)
  override def copy(extra: ParamMap): LibSVMParserModel = {
    val copied = new LibSVMParserModel(uid,start,vocabulary)
    copyValues(copied, extra).setParent(parent)
  }
  override def write: MLWriter = new LibSVMParserModelWriter(this)

  override def transform(dataset: Dataset[_]): DataFrame = {
    transformSchema(dataset.schema,logging = true)

    val func = udf { row: Row => {
      val fieldName = row.schema.fieldNames.head
      val startVal = start(fieldName)
      val dict = vocabulary(fieldName)
      val values = row.getAs(fieldName).toString.split(",")
      val res = values.map(v=>{
        if($(returnValue)){
          if(dict.contains(v)){
            startVal+dict(v)+":"+v
          }else{
            startVal+dict.size-1+":"+"-1"
          }
        }else{
          if(dict.contains(v)){
            startVal+dict(v)
          }else{
            -1
          }
        }

      }).mkString(",")
      res
    }}
    var dataset_ = dataset
    $(inputCols).zip($(outputCols)).foreach(io=>{
      dataset_ = dataset_.withColumn(io._2,func(struct(col(io._1))))
    })
    dataset_.toDF()
  }

  override def transformSchema(schema: StructType): StructType = validateAndTransformSchema(schema)
}

object LibSVMParserModel extends MLReadable[LibSVMParserModel] {

  private[LibSVMParserModel]
  class LibSVMParserModelWriter(instance: LibSVMParserModel) extends MLWriter {

    private case class Data(start:Map[String,Int],vocabulary: Map[String, Map[String,Int]])

    override protected def saveImpl(path: String): Unit = {
      DefaultParamsWriter.saveMetadata(instance, path, sc)
      val data = Data(instance.start,instance.vocabulary)
      val dataPath = new Path(path, "data").toString
      sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
    }
  }

  private class LibSVMParserModelReader extends MLReader[LibSVMParserModel] {
    private val className = classOf[LibSVMParserModel].getName

    override def load(path: String): LibSVMParserModel = {
      val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
      val dataPath = new Path(path, "data").toString
      val data = sparkSession.read.parquet(dataPath)
        .select("start","vocabulary")
        .head()
      val start = data.getAs[Map[String,Int]]("start")
      val vocabulary = data.getAs[Map[String,Map[String,Int]]]("vocabulary")
      val model = new LibSVMParserModel(metadata.uid,start,vocabulary)
      DefaultParamsReader.getAndSetParams(model, metadata)
      model
    }
  }

  override def read: MLReader[LibSVMParserModel] = new LibSVMParserModelReader
  override def load(path: String): LibSVMParserModel = super.load(path)
}