package cn.itcast.czxy.BD18.ml

import cn.itcast.czxy.BD18.bean.BaseMode
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.{MinMaxScaler, MinMaxScalerModel, VectorAssembler}
import org.apache.spark.sql.{DataFrame, functions}
import org.apache.spark.sql.functions._
import org.apache.spark.storage.StorageLevel

import scala.collection.immutable

object RFEMode1 extends BaseMode{
  override def setAppName: String = "RFEMode1"

  override def setLeven4Id: Int = 139

  override def getNewTag(leve5: DataFrame, hbaseDF: DataFrame): DataFrame = {
    import org.apache.spark.sql.functions._
    import spark.implicits._
    val userRFEDF = hbaseDF.repartition(4).groupBy("global_user_id")
      .agg(
        (datediff(current_timestamp(), max("log_time") )- 300).as("log_time"),
        count("loc_url").as("pl"),
        countDistinct("loc_url").as("hdd")
      ).persist(StorageLevel.MEMORY_AND_DISK)

    var getrscore=when(col("log_time").between(0,15),5)
        .when(col("log_time").between(16,30),4)
        .when(col("log_time").between(31,45),3)
        .when(col("log_time").between(46,60),2)
        .when(col("log_time").gt(60),1).as("log_time")

    val zh = userRFEDF.select('global_user_id,getrscore, 'pl, 'hdd).persist(StorageLevel.MEMORY_AND_DISK)
//zh.show()
    val userRFEDFVector = new VectorAssembler().setInputCols(Array("log_time", "pl", "hdd"))
      .setOutputCol("feature")
      .transform(zh).persist(StorageLevel.MEMORY_AND_DISK)

    val model: MinMaxScalerModel = new MinMaxScaler().setInputCol("feature").setOutputCol("featureMiaS").fit(userRFEDFVector)
    val userRFEDFVectorModel: DataFrame = model.transform(userRFEDFVector).persist(StorageLevel.MEMORY_AND_DISK)
//    userRFEDFVectorModel.show()
var sse=""
    for (k <- 2 to 15) {
      val km: KMeansModel = new KMeans().setK(k).setMaxIter(10).setFeaturesCol("featureMiaS").setPredictionCol("featureOut").fit(userRFEDFVectorModel)
      sse+=km.computeCost(userRFEDFVectorModel)+","
    }
    println(sse)
//    val km: KMeansModel = new KMeans().setK(5).setMaxIter(10).setFeaturesCol("featureMiaS").setPredictionCol("featureOut").fit(userRFEDFVectorModel)
//    val userRFEDFVectorModelKm = km.transform(userRFEDFVectorModel)
    //    val clusterCenters: immutable.IndexedSeq[(Int, Double)] = for (i <- km.clusterCenters.indices) yield (i,km.clusterCenters(i).toArray.sum)
//    val clusterCentersSortBy = clusterCenters.sortBy(-_._2)
//    val clusterCentersSortByIndices = for (elem <- clusterCentersSortBy.indices) yield (clusterCentersSortBy(elem)._1,elem)
////    clusterCentersSortByIndices.foreach(println)
//    val pxhoukm = clusterCentersSortByIndices.toDF("featureOut","rule")
//
//    val leve5pxgg: DataFrame = pxhoukm.join(leve5,pxhoukm("rule")===leve5("rule")).select(leve5("id").as("tagsId"),pxhoukm("featureOut"))
//    //    leve5pxgg.show()
//    val jg: DataFrame = leve5pxgg.join(userRFEDFVectorModelKm,leve5pxgg("featureOut")===userRFEDFVectorModelKm("featureOut")).select('global_user_id.as("userId"),'tagsId)
////    jg.show()
//    jg
    null
  }

  def main(args: Array[String]): Unit = {
    exec()
  }
}
