package userchurn

import org.apache.spark.ml.{Pipeline, PipelineModel, PipelineStage}
import org.apache.spark.ml.classification.{RandomForestClassificationModel, RandomForestClassifier}
import org.apache.spark.ml.evaluation.{BinaryClassificationEvaluator, MulticlassClassificationEvaluator}
import org.apache.spark.ml.feature.{StringIndexer, VectorAssembler}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

object RandomForest {
  val modelPath = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\model\\frMode"
  System.setProperty("hadoop.home.dir", "C:\\hadoop-2.8.1")
  val spark: SparkSession = SparkSession.builder.appName("KMeans").master("local").getOrCreate

  def main(args: Array[String]): Unit = {
    userFeaturesCorrelations()
//        train()
//        predict()
  }

  private def userFeaturesCorrelations(): Unit = {
    val user = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\user_tag_new_1.csv"
    val arr = Array(/*"prs", "issues", "comments", "stars", "watchs", "forks", */ "pr_merged_percent", "ci_fail_percent",
      "issue_closed_percent", "issue_rejected_percent", /*"last_contribute_to_now",*/ "sig_cluster", "pr_open_days_avg", "issue_open_days_avg", "tag")

    val userDS = spark.read.option("header", value = true).csv(user)
      .select(arr.map(e => abs(col(e).cast("double")).as(e)): _*)
    //    userDS.show()

    import spark.implicits._

    val aa = arr.dropRight(1).map(field => (field, userDS.stat.corr("tag", field)))
    spark.sparkContext.parallelize(aa).toDF("field_tag", "corr").sort("corr").show(100, false)

  }

  def train(): Unit = {
    val user = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\user_tag_new_1.csv"
    val arr = Array("gitee_id", /*"prs", "issues", "comments", "stars", "watchs", "forks",*/ "pr_merged_percent", "ci_fail_percent", "pr_open_days_avg",
      "issue_closed_percent", "issue_rejected_percent", "issue_open_days_avg", "sig_cluster", "last_contribute_to_now", "tag")
    val userDS = spark.read
      .option("header", value = true)
      .option("inferSchema", value = true)
      .csv(user)
      .select(arr.map(col): _*)

    //拆分数据为训练集和测试集
    val Array(trainingData, testData) = userDS.randomSplit(Array(0.7, 0.3), 5000)

    //label标签
    val labelIndex = new StringIndexer().setInputCol("tag").setOutputCol("label").fit(trainingData)
    //拼接为Vector
    val assembler = new VectorAssembler().setInputCols(arr.drop(1).dropRight(2)).setOutputCol("features")
    //RF模型
    val rf = new RandomForestClassifier().setImpurity("gini").setMaxDepth(3).setNumTrees(20).setFeatureSubsetStrategy("auto").setSeed(5043)
    //使用Pipeline 转化模型
    val pipeline = new Pipeline().setStages(Array[PipelineStage](labelIndex, assembler, rf))
    //使用Pipeline训练模型
    val model = pipeline.fit(trainingData)
    //保存模型
    model.write.overwrite().save(modelPath)

    //使用测试集对模型进行测试
    val dataset = model.transform(testData)
    dataset.show()

    //Debug信息
    val rfModel = model.stages(2).asInstanceOf[RandomForestClassificationModel]
    println(s"debug: ${rfModel.toDebugString}")

    //校验
    val metric = "accuracy" //"f1" (default), "weightedPrecision", "weightedRecall", "accuracy"
    val evaluator = new MulticlassClassificationEvaluator().setLabelCol("label").setPredictionCol("prediction")
      .setMetricName(metric)
    println(s"$metric: ${evaluator.evaluate(dataset)}")

    val evaluator2 = new BinaryClassificationEvaluator().setMetricName("areaUnderROC") //"areaUnderROC"(default), "areaUnderPR"
      .setRawPredictionCol("rawPrediction").setLabelCol("label")
    val AUC = evaluator2.evaluate(dataset)
    println(s"Area Under ROC: $AUC")


  }

  def predict(): Unit = {
    val user = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\opengauss\\test.csv"
    val arr = Array("gitee_id", /*"prs", "issues", "comments", "stars", "watchs", "forks", */ "pr_merged_percent", "ci_fail_percent", "pr_open_days_avg",
      "issue_closed_percent", "issue_rejected_percent", "issue_open_days_avg", "sig_cluster" , "last_contribute_to_now")
    val userDS = spark.read
      .option("header", value = true)
      .option("inferSchema", value = true)
      .csv(user)
      .select(arr.map(col): _*)

    val plModel = PipelineModel.load(modelPath)
    val preDf = plModel.transform(userDS)

    val fields = preDf.schema.fieldNames.map(col(_).cast("string"))
    preDf.select(fields: _*).write.mode("overwrite")
      .option("header", value = true).csv("C:\\Users\\Administrator\\Desktop\\用户流失预测\\user-rf.csv")
  }
}
