package com.fudian.spark_platform.Clustering

import com.fudian.spark_platform.MLClusteringConf
import org.apache.spark.ml.feature.IDF
import org.apache.spark.sql.DataFrame

import scala.collection.mutable

/**
  * CopyRight FuMan AI DataDimming.co SHANGHAI,RPC
  * File can't be used to License
  * Created by XiaoJun on 2018/3/13.
  */
class TF_IDFClustering {

    var df:DataFrame = null
    var config:MLClusteringConf = null

    def this(df:DataFrame,config:MLClusteringConf) = {
        this()
        this.df = df
        this.config = config
    }

    def clustering() : DataFrame = {

        val paramsMap = this.config.mLConfig.mLConfig("MLParams").asInstanceOf[mutable.Map[String,Any]]
        val minDoc = paramsMap("minDoc").asInstanceOf[Int]
        val inputCol = paramsMap("inputCol").asInstanceOf[String]
        val outputCol = paramsMap("outputCol").asInstanceOf[String]


        val idf = new IDF()
            .setMinDocFreq(minDoc)
            .setInputCol(inputCol)
            .setOutputCol(outputCol)
        val idfModel = idf.fit(this.df)

        val rescaledData = idfModel.transform(this.df)
        rescaledData.select("label", "features").show()
        rescaledData
    }

}
