import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.classification.{RandomForestClassificationModel, RandomForestClassifier}
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.{IndexToString, StringIndexer, VectorAssembler, VectorIndexer}
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
// $example off$
import org.apache.spark.sql.SparkSession

object RandomForestClassifier {
  val sc = new SparkContext(new SparkConf().setMaster("local[*]").setAppName(""))
  val spark = SparkSession
    .builder
    .appName("RandomForestClassifierExample")
    .getOrCreate()

  def main(args: Array[String]): Unit = {
    // $example on$
    // Load and parse the data file, converting it to a DataFrame.
    var data = getData()


//    data.where("label < 1").show(false)
    // Index labels, adding metadata to the label column.
    // Fit on whole dataset to include all labels in index.
    val labelIndexer = new StringIndexer()
      .setInputCol("label")
      .setOutputCol("indexedLabel")
      .fit(data)


    val featureIndexer = new VectorIndexer()
      .setInputCol("features")
      .setOutputCol("indexedFeatures")
      .fit(data)

    // Split the data into training and test sets (30% held out for testing).
    val Array(trainingData, testData) = data.randomSplit(Array(0.8, 0.2))


    // Train a RandomForest model.
    val rf = new RandomForestClassifier()
      .setLabelCol("indexedLabel")
      .setFeaturesCol("indexedFeatures")
      .setMaxDepth(10)
      .setNumTrees(50)
      .setMaxBins(80)

    // Convert indexed labels back to original labels.
    val labelConverter = new IndexToString()
      .setInputCol("prediction")
      .setOutputCol("predictedLabel")
      .setLabels(labelIndexer.labels)

    // Chain indexers and forest in a Pipeline.
    val pipeline = new Pipeline()
      .setStages(Array(labelIndexer, featureIndexer, rf, labelConverter))

    // Train model. This also runs the indexers.
    val model = pipeline.fit(trainingData)

    // Make predictions.
    val predictions = model.transform(testData)

    // Select example rows to display.
    predictions.select("predictedLabel", "label", "features").show(5)

    // Select (prediction, true label) and compute test error.
    val evaluator = new MulticlassClassificationEvaluator()
      .setLabelCol("indexedLabel")
      .setPredictionCol("prediction")
      .setMetricName("accuracy")
    val accuracy = evaluator.evaluate(predictions)
    println(s"Accuracy = ${accuracy}")


    val rfModel = model.stages(2).asInstanceOf[RandomForestClassificationModel]
//    println(s"Learned classification forest model:\n ${rfModel.toDebugString}")

//    rfModel.save("output/MouseDetectionModel/")

    val vector1: Vector = Vectors.dense(1.000000,62.000000,974.306452,2795.709677,3.169169,0.960354,4765.000000,466.000000,-7.000000,0.000000,261.000000,7.000000,0.297076,2.414056,0.000000,0.249455,0.499454,0.001158,0.060673,-0.021707,0.000144,0.012018,2608.200000,51.070539,491.826811,88.476996,675.000000,202.000000)
    val vector2: Vector = Vectors.dense(2.000000,80.000000,688.125000,2376.737500,4.504374,0.395121,4399.000000,22.000000,7.000000,0.000000,27.000000,7.000000,0.273807,0.933333,0.000000,0.054609,0.233687,0.000652,0.039316,-0.031217,0.000210,0.014479,8.976316,2.996050,22.011234,12.728481,794.500000,1066.500000)
    val vector3: Vector = Vectors.dense(3.000000,104.000000,1294.365385,2921.000000,5.347466,2.254956,19963.000000,2569.000000,-7.000000,0.000000,141.000000,7.000000,0.652639,3.528311,0.000000,0.460256,0.678422,0.000720,0.111111,-0.070683,0.000597,0.024424,577873.776316,760.180095,335.571322,109.995146,1592.000000,189.000000)
    val vector4: Vector = Vectors.dense(4.000000,3.000000,287.666667,2542.000000,1.056642,0.528321,1051.000000,496.000000,0.000000,0.000000,303.000000,0.000000,0.009259,0.027778,0.000000,0.000257,0.016038,0.000100,0.000100,0.000100,0.000000,0.000000,26319.000000,162.231316,16.333333,0.000000,713.500000,189.000000)
    val vector5: Vector = Vectors.dense(5.000000,12.000000,458.000000,2602.666667,3.584963,2.091228,13048.000000,205.000000,56.000000,0.000000,4674.000000,56.000000,1.163575,4.143670,0.000000,2.184981,1.478168,0.010124,0.080818,-0.010386,0.000715,0.026748,2578033.840909,1605.625685,592.083333,4766.568182,528.000000,436.000000)
    val vector6: Vector = Vectors.dense(6.000000,144.000000,905.902778,2635.076389,6.721805,0.846449,10570.000000,445.000000,0.000000,-13.000000,3456.000000,13.000000,0.359176,0.909761,0.000000,0.046071,0.214642,0.000317,0.028911,-0.025754,0.000101,0.010034,578594.842105,760.654220,20.956828,23.825126,1294.500000,189.000000)
    val vector7: Vector = Vectors.dense(7.000000,13.000000,599.615385,2604.000000,3.415791,1.423246,15514.000000,817.000000,0.000000,-26.000000,66.000000,26.000000,0.532081,1.555556,0.000000,0.364783,0.603973,0.002244,0.015610,-0.012585,0.000058,0.007620,2891876.769231,1700.551901,674.064103,2727.833333,535.000000,189.000000)
    val vector8: Vector = Vectors.dense(8.000000,30.000000,587.966667,2584.466667,3.434823,1.635630,6562.000000,121.000000,7.000000,0.000000,96.000000,7.000000,0.494747,3.370370,0.000000,0.525876,0.725173,0.002447,0.066415,-0.024329,0.000225,0.015003,267768.378947,517.463408,1056.654023,36.713793,661.000000,358.000000)
    val vector9: Vector = Vectors.dense(9.000000,15.000000,669.866667,2626.066667,3.385972,2.083675,2947.000000,43.000000,7.000000,0.000000,243.000000,7.000000,0.935074,3.087070,0.000000,1.104599,1.050999,0.004959,0.052152,-0.014815,0.000366,0.019128,118366.542857,344.044391,2523.266667,873.971429,720.500000,189.000000)
    val vector10: Vector = Vectors.dense(10.000000,90.000000,719.566667,2375.600000,4.832824,0.360659,4642.000000,28.000000,7.000000,0.000000,21.000000,7.000000,0.296318,1.166667,0.000000,0.062621,0.250242,0.000864,0.051282,-0.037809,0.000246,0.015676,23.210526,4.817730,21.630961,13.271036,887.000000,1222.500000)
    println(rfModel.predict(vector1))
    println(rfModel.predict(vector2))
    println(rfModel.predict(vector3))
    println(rfModel.predict(vector4))
    println(rfModel.predict(vector5))
    println(rfModel.predict(vector6))
    println(rfModel.predict(vector7))
    println(rfModel.predict(vector8))
    println(rfModel.predict(vector9))
    println(rfModel.predict(vector10))

    spark.stop()
  }

  def getData(): DataFrame ={
    var data = spark.read.format("csv")
      .option("delimiter", ";")
      .load("/Users/bink/pra/python/LearningPython/output/9.csv")

    data = data.toDF("id", "numRecode", "xmean", "ymean", "xEnt", "yEnt", "MaxTimeInterval",
      "MinTimeInterval", "tailXdiff", "tailYdiff", "tailTdiff", "tailDis_xy",
      "Vmean", "Vmax", "Vmin", "Vvar", "Vstd", "Accmean", "Accmax", "Accmin",
      "Accvar", "Accstd", "LastT20var", "LastT20std", "XdiffVar", "YdiffVar",
      "YTarget", "XTarget", "label")
      .withColumn("id", col("id").cast(IntegerType))
      .withColumn("numRecode", col("numRecode").cast(DoubleType))
      .withColumn("xmean", col("xmean").cast(DoubleType))
      .withColumn("ymean", col("ymean").cast(DoubleType))
      .withColumn("xEnt", col("xEnt").cast(DoubleType))
      .withColumn("yEnt", col("yEnt").cast(DoubleType))
      .withColumn("MaxTimeInterval", col("MaxTimeInterval").cast(DoubleType))
      .withColumn("MinTimeInterval", col("MinTimeInterval").cast(DoubleType))
      .withColumn("tailXdiff", col("tailXdiff").cast(DoubleType))
      .withColumn("tailYdiff", col("tailYdiff").cast(DoubleType))
      .withColumn("tailTdiff", col("tailTdiff").cast(DoubleType))
      .withColumn("tailDis_xy", col("tailDis_xy").cast(DoubleType))
      .withColumn("Vmean", col("Vmean").cast(DoubleType))
      .withColumn("Vmax", col("Vmax").cast(DoubleType))
      .withColumn("Vmin", col("Vmin").cast(DoubleType))
      .withColumn("Vvar", col("Vvar").cast(DoubleType))
      .withColumn("Vstd", col("Vstd").cast(DoubleType))
      .withColumn("Accmean", col("Accmean").cast(DoubleType))
      .withColumn("Accmax", col("Accmax").cast(DoubleType))
      .withColumn("Accmin", col("Accmin").cast(DoubleType))
      .withColumn("Accvar", col("Accvar").cast(DoubleType))
      .withColumn("Accstd", col("Accstd").cast(DoubleType))
      .withColumn("LastT20var", col("LastT20var").cast(DoubleType))
      .withColumn("LastT20std", col("LastT20std").cast(DoubleType))
      .withColumn("XdiffVar", col("XdiffVar").cast(DoubleType))
      .withColumn("YdiffVar", col("YdiffVar").cast(DoubleType))
      .withColumn("YTarget", col("YTarget").cast(DoubleType))
      .withColumn("XTarget", col("XTarget").cast(DoubleType))
      .withColumn("label", col("label").cast(IntegerType))


      val assembler = new VectorAssembler()
        .setInputCols(Array("numRecode", "xmean", "ymean", "xEnt", "yEnt", "MaxTimeInterval",
          "MinTimeInterval", "tailXdiff", "tailYdiff", "tailTdiff", "tailDis_xy",
          "Vmean", "Vmax", "Vmin", "Vvar", "Vstd", "Accmean", "Accmax", "Accmin",
          "Accvar", "Accstd", "LastT20var", "LastT20std", "XdiffVar", "YdiffVar",
          "YTarget", "XTarget"))
        .setOutputCol("features")

//    val assembler = new VectorAssembler()
//      .setInputCols(Array("xEnt", "yEnt", "MaxTimeInterval",
//        "MinTimeInterval", "tailXdiff", "tailYdiff", "tailTdiff", "tailDis_xy",
//        "Vmean", "Vmax", "Vmin", "Vstd", "Accmean", "Accmax", "Accmin",
//        "Accvar", "Accstd", "LastT20var", "LastT20std", "XdiffVar", "YdiffVar",
//        "YTarget", "XTarget"))
//      .setOutputCol("features")

    data.asInstanceOf[org.apache.spark.sql.Dataset[Float]]
    val output = assembler.transform(data)
    data = output.join(data, Array("id", "numRecode", "xmean", "ymean", "xEnt", "yEnt", "MaxTimeInterval",
      "MinTimeInterval", "tailXdiff", "tailYdiff", "tailTdiff", "tailDis_xy",
      "Vmean", "Vmax", "Vmin", "Vvar", "Vstd", "Accmean", "Accmax", "Accmin",
      "Accvar", "Accstd", "LastT20var", "LastT20std", "XdiffVar", "YdiffVar",
      "YTarget", "XTarget", "label"))
    data.printSchema()
    data.show(false)
    return data
  }
}