package cn.lagou.test

import org.apache.spark.sql.SparkSession

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row;
import scala.collection.mutable.ArrayOps;
import org.apache.spark.sql.functions._

object Iris_KMeans {

  case class model_instance(features: org.apache.spark.ml.linalg.Vector);


  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("Iris_KMeans")
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
    sc.setLogLevel("warn")

    val fileName = "file:///F:\\lagou\\lagouhomework\\stage_4_module_2\\6.鸢尾花KMeans算法\\Iris_KMeans\\data\\Iris.csv";
    import spark.implicits._


    //    val df = spark.read.format("csv")
    //      .option("header", "true")
    //      .option("inferschema", "true")
    //      .load(("data/Iris.csv"));
    //    df.show();
    spark.sql(
      """
        |create or replace temporary view iris
        | using csv
        |options (path "data/iris.csv",
        |         header "true",
        |         inferschema "true")
        |""".stripMargin)
    val allData = spark.sql("select * from iris")
    allData.show()

    //    val df2 = allData.toDF()//"SepalLengthCm", "SepalWidthCm", "PetalLengthCm", "PetalWidthCm"
    //    df2.printSchema()
    //    df2.show()

    //    val df = allData.map(r => {
    //      val arr = Array(r.getAs[Double](1),r.getAs[Double](2),r.getAs[Double](3),r.getAs[Double](4));
    //      arr
    //    }).toDF();


    import org.apache.spark.ml.linalg.{Vector, Vectors}
    import org.apache.spark.ml.clustering.KMeans
    import  org.apache.spark.ml.clustering.KMeansModel

    val lines: RDD[String] = sc.textFile(fileName)
    val df = lines.filter(!_.startsWith("Id,SepalLengthCm,")).map(line => {
      model_instance(Vectors.dense(line.split(",").filter(p => p.matches("\\d*(\\.?)\\d*")).map(_.toDouble)))
    }).toDF();
    /*
    val df = allData.map(r => {
      val arr = Array(r.getAs[Double](1), r.getAs[Double](2), r.getAs[Double](3), r.getAs[Double](4));
      model_instance(Vectors.dense(arr))
    }).toDF();
*/
    val kmeansmodel = new KMeans().setK(3).setFeaturesCol("features").setPredictionCol("prediction").fit(df)
    val results = kmeansmodel.transform(df);
    results.collect().foreach(
      row => {
        println(row(0) + "is predicted as cluster " + row(1))
      }
    )

    kmeansmodel.clusterCenters.foreach(
      center => {
        println("Clustering Center : " + center)
      }
    )
    kmeansmodel.computeCost(df);
    /*

    */

    df.show()

    /*
    import org.apache.spark.mllib.clustering.KMeans
    import  org.apache.spark.mllib.clustering.KMeansModel
    val rawTrainingData = sc.textFile(fileName)
    val parsedTrainingData =rawTrainingData.filter(!_.startsWith("Id,")).map{ line=>
      Vectors.dense(line.split(',').filter(p => p.matches("\\d*(\\.?)\\d*")).map(_.toDouble))
    }.cache()

    val numClusters = 10
    val numIterations = 30
    val runTimes = 3
    var clusterIndex: Int = 0
    val clusters:KMeansModel= KMeans.train(parsedTrainingData,numClusters,numIterations,runTimes)
    println("cluster Number:"+clusters.clusterCenters.length)
    println("Cluster centers Information Overview")
    clusters.clusterCenters.foreach(
      x => {
        println("Center Point of Cluster" + clusterIndex + ":")
        println(x)
        clusterIndex +=1
      })
    val rawTestData = sc.textFile(fileName)

    val parsedTestData =rawTestData.filter(!_.startsWith("Id,")).map{ line=>
        Vectors.dense(line.split(',').filter(p => p.matches("\\d*(\\.?)\\d*")).map(_.toDouble))
    }

    parsedTestData.collect().foreach(
      testDataLine => {
        val predictedClusterIndex: Int = clusters.predict(testDataLine)
        println("The data "+ testDataLine.toString + "belongs to cluster"
          + predictedClusterIndex)
      })


    println("Spark MLlib K-means clustering test finished")
    */

    spark.close();
  }
}
