package com.feidee.fdspark.transformer.intermediate

import org.apache.spark.ml.Transformer
import org.apache.spark.ml.param.{Param, ParamMap, StringArrayParam}
import org.apache.spark.ml.util.{DefaultParamsWritable, Identifiable}
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.types.StructType

/**
  * @Author: xiongjun
  * @Date: 2019/5/21 10:07
  */
class SparkTensoflowConnector(override val uid: String) extends Transformer with DefaultParamsWritable {
  final val inputPath = new Param[String](this, "inputPath", "input hdfs path")
  final val outputPath = new Param[String](this, "outputPath", "output hdfs path")


  def setInputPath(value: String): this.type = set(inputPath, value)
  def getDelimiter: String = $(inputPath)
  def setOutputPath(value: String): this.type = set(outputPath, value)
  def getOutputCol: String = $(outputPath)


  def this() = this(Identifiable.randomUID("sparkTensoflowConnector"))

  override def transform(dataset: Dataset[_]): DataFrame = {
    dataset.write.format("tfrecords").option("recordType", "Example").save($(outputPath))
    dataset.toDF()
  }

  override def copy(extra: ParamMap): Transformer = defaultCopy(extra)

  override def transformSchema(schema: StructType): StructType = {
    require(!$(inputPath).equals("") && $(inputPath)!=null,"input hdfs path not null")
    require(!$(outputPath).equals("") && $(outputPath)!=null,"output hdfs path not null")
    schema
  }
}
