package LogServer.spark.mllib

import LogServer.constants.{PropertiesAssemblyJar, PropertiesStreaming}
import LogServer.spark.sql.SqlContext
import LogServer.spark.streaming.SparkStreaming
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFFirstValue
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.clustering.{KMeans, KMeansModel}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.sql.SQLContext
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * Created by root on 17-3-2.
  */
object Kmeans {
  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("LogServer").setMaster("spark://master:7077").
      set("spark.executor.memory", "500m").setJars(PropertiesAssemblyJar.getAssemblyJars())
    val timespan = PropertiesStreaming.getTimespan()
    val ssc = new StreamingContext(conf, Seconds(timespan))
    val model = getModel(ssc.sparkContext)
    model match {
      case Some(m) => {
        val a21 = this.predict(m,0.0,0.0)
        val a22 = this.predict(m,100.0,100.0)
        val a23 = this.predict(m,59.0,34.0)
        println("Prediction of (0.0,0.0)-->"+a21)
        println("Prediction of (100.0,100.0)-->"+a22)
        println("Prediction of (59.0,34.0)-->"+a23)
        //    val a21 = model.predict(Vectors.dense(1.2,1.3))
        //    val a22 = model.predict(Vectors.dense(44.1,44.2))
      }
      case None =>
    }
  }

  def getModel(sc: SparkContext): Option[KMeansModel]= {
    val df = SqlContext.loadCollection(sc,"vmhost")
    if(df.toString().equals("[]"))return None
    println("df = "+ df.toString())
    //检查是否有相关属性
    val filter_df = df.select("vm_cpu","vm_mem_rate")
    println(filter_df)
    val parsedData = filter_df.rdd.map(row =>{
//      println(row)
      Vectors.dense(row.getAs("vm_cpu").toString.toDouble,row.getAs("vm_mem_rate").toString.toDouble)
    })
    val kmeans = new KMeans().setK(3).setMaxIterations(100).setSeed(1)
    val model = kmeans.run(parsedData)
    println(parsedData.map(v=> v.toString +" belong to cluster :" +
      model.predict(v)).collect().mkString("\n"))

    val WSSSE = model.computeCost(parsedData)
    println("WithinSet Sum of Squared Errors = " + WSSSE)

    println("ClusterCenters:")
    for(center <- model.clusterCenters){
      println(" "+center)
    }
    Some(model)
  }

  /**
    *
    * @param model
    * @param d1
    * @param d2
    * @return
    */
  def predict(model:KMeansModel, d1: Double, d2: Double): Int ={
    val vector = Vectors.dense(d1,d2)
    val result = model.predict(vector)
    result
  }
}
