package com.fwmagic.spark.xgboost

import ml.dmlc.xgboost4j.scala.spark.{XGBoostClassificationModel, XGBoostClassifier}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.feature.{StringIndexer, VectorAssembler}
import org.apache.spark.mllib.util.MLUtils

import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.DataFrame
 
object SparkTraining2 {
 
  def main(args: Array[String]): Unit = {
//    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)

    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("xgboost_spark_demo")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //      .config("spark.memory.fraction", 0.3)
      //      .config("spark.shuffle.memoryFraction", 0.5)
      .getOrCreate()
    //step 1: 读取 CSV 数据
    val df_train = spark.read.format("com.databricks.spark.csv")
      .option("header", "false")
      .option("inferSchema", true.toString)
      .load("/Users/pboc_train.csv")
 
 
    //step 2: 处理CSV 数据
    var xgbTrain = processData(df_train)
 
 
    val df_test = spark.read.format("com.databricks.spark.csv")
      .option("header", "false")
      .option("inferSchema", true.toString)
      .load("/Users/pboc_test.csv")
 
    var xgbTest = processData(df_test)
 
    val xgbParam = Map("eta" -> 0.1f,
      "objective" -> "binary:logistic",
      "num_round" -> 100,
      "num_workers" -> 4
    )
    val xgbClassifier = new XGBoostClassifier(xgbParam)
      .setEvalMetric("auc")
      .setMaxDepth(5)
      .setFeaturesCol("features")
      .setLabelCol("class")
 
    println("Start Trainning ......")
    val xgbClassificationModel: XGBoostClassificationModel = xgbClassifier.fit(xgbTrain)
    println("End Trainning ......")
 
    println("Predicting ...")
    val results = xgbClassificationModel.transform(xgbTest)
    results.show()
  }

  def processData(df_data: DataFrame): DataFrame = {
    var columns = df_data.columns.clone()
    var feature_columns = new ArrayBuffer[String]()
    for (i <- 1 until columns.length) {
      feature_columns += columns(i)
    }

    val stringIndexer = new StringIndexer()
      .setInputCol("_c0")
      .setOutputCol("class")
      .fit(df_data)

    val labelTransformed = stringIndexer.transform(df_data).drop("_c0")

    val array: Array[String] = feature_columns.toArray

    val train_vectorAssembler = new VectorAssembler()
      .setInputCols(array)
      .setOutputCol("features")

    val xgbData: DataFrame = train_vectorAssembler.transform(labelTransformed).select("features", "class")

    return xgbData
  }

}
