package org.apache.spark.ml.feature

import org.apache.hadoop.fs.Path
import org.apache.spark.ml.feature.DictMapperModel.DictMapperModelWriter
import org.apache.spark.ml.param.shared.{HasInputCol, HasOutputCol}
import org.apache.spark.ml.param.{Param, ParamMap, Params}
import org.apache.spark.ml.util._
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Dataset}

/**
  * @Author: xiongjun
  * @Date: 2019/8/26 15:01
  */
private[feature] trait DictMapperBase extends Params with HasInputCol with HasOutputCol {
  protected def validateAndTransformSchema(schema: StructType): StructType = {
    val inputColName = $(inputCol)
    val inputDataType = schema(inputColName).dataType
    require(inputDataType == StringType,
      s"The input column $inputColName must be string type" +
        s"but got $inputDataType.")
    val inputFields = schema.fields
    val outputColName = $(outputCol)
    require(inputFields.forall(_.name != outputColName),
      s"Output column $outputColName already exists.")
    val sf = StructField($(outputCol), ArrayType(StringType))
    val outputFields = inputFields :+ sf
    StructType(outputFields)
  }
}
class DictMapper(override val uid: String) extends Estimator[DictMapperModel]
  with DictMapperBase with DefaultParamsWritable {
  def this() = this(Identifiable.randomUID("dictMapper"))
  final val regex = new Param[String](this,"regex","match regular expression")
  def setRegex(value:String):this.type  = set(regex,value)
  def setInputCol(value: String): this.type = set(inputCol, value)
  def setOutputCol(value: String): this.type = set(outputCol, value)

  override def fit(dataset: Dataset[_]): DictMapperModel = {
    transformSchema(dataset.schema, logging = true)
    logInfo(s"dataset schema is ${dataset.printSchema()}")
    val pattern = $(regex).r
    val data = dataset.select(col($(inputCol)).cast(StringType)).rdd.map(row=>{
      val appname = row.getString(0)
      val matchStr = pattern.findAllIn(appname)
//      if (matchStr.length>appname.length*0.2){
//        matchStr.toArray
//      }else{
//        null
//      }
      matchStr.toArray
    }).filter(arr=>arr!=null && !arr.isEmpty).collect().flatten.distinct

    logInfo(s"data: ${data.mkString(",")}")
    copyValues(new DictMapperModel(uid,$(regex),data).setParent(this))
  }

  override def copy(extra: ParamMap): Estimator[DictMapperModel] = defaultCopy(extra)

  override def transformSchema(schema: StructType): StructType = validateAndTransformSchema(schema)
}

object DictMapper extends DefaultParamsReadable[DictMapper] {
  override def load(path: String): DictMapper = super.load(path)
}


class DictMapperModel(override val uid: String,val regex:String,val charset:Array[String])
  extends Model[DictMapperModel] with DictMapperBase with MLWritable{
  def this(regex:String,charset: Array[String]) =
    this(Identifiable.randomUID("dictMapper"),regex,charset)
  override def copy(extra: ParamMap): DictMapperModel = {
    val copied = new DictMapperModel(uid,regex, charset)
    copyValues(copied, extra).setParent(parent)
  }

  override def write: MLWriter = new DictMapperModelWriter(this)

  override def transform(dataset: Dataset[_]): DataFrame = {
    require(dataset.schema.fieldNames.contains($(inputCol)),s"Input column ${$(inputCol)} does not exist")
    transformSchema(dataset.schema, logging = true)
    logInfo(s"charset length is : ${charset.length}")
    val func = udf{appname:String =>{
      val pattern = regex.r
      pattern.findAllIn(appname).filter(ch=>charset.contains(ch)).toArray

    }}
    dataset.withColumn($(outputCol),func(col($(inputCol))))
  }

  override def transformSchema(schema: StructType): StructType = {
      validateAndTransformSchema(schema)
  }
}

object DictMapperModel extends MLReadable[DictMapperModel] {
  private[DictMapperModel]
  class DictMapperModelWriter(instance:DictMapperModel) extends MLWriter{
    private case class Data(regex:String,charset:Array[String])
    override protected def saveImpl(path: String): Unit = {
      DefaultParamsWriter.saveMetadata(instance, path, sc)
      val data = Data(instance.regex,instance.charset)
      val dataPath = new Path(path, "data").toString
      sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
    }
  }

  private class DictMapperModelReader extends MLReader[DictMapperModel] {
    private val className = classOf[DictMapperModel].getName
    override def load(path: String): DictMapperModel = {
      val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
      val dataPath = new Path(path, "data").toString
      val data = sparkSession.read.parquet(dataPath)
        .select("regex","charset")
        .head()
      val regex = data.getString(0)
      val charset = data.getAs[Seq[String]](1).toArray
      val model = new DictMapperModel(metadata.uid,regex,charset)
      DefaultParamsReader.getAndSetParams(model, metadata)
      model
    }
  }

  override def read: MLReader[DictMapperModel] = new DictMapperModelReader

  override def load(path: String): DictMapperModel = super.load(path)
}
