package userchurn

import java.time.{LocalDateTime, ZoneOffset}

import org.apache.spark.ml.classification.{RandomForestClassificationModel, RandomForestClassifier}
import org.apache.spark.ml.clustering.KMeans
import org.apache.spark.ml.evaluation.{BinaryClassificationEvaluator, MulticlassClassificationEvaluator}
import org.apache.spark.ml.{Pipeline, PipelineStage}
import org.apache.spark.ml.feature.{StringIndexer, VectorAssembler}
import org.apache.spark.ml.regression.RandomForestRegressionModel
import org.apache.spark.ml.tuning.{CrossValidator, ParamGridBuilder}
import org.apache.spark.mllib.evaluation.RegressionMetrics
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._

object DataCollectionAndTag {
  System.setProperty("hadoop.home.dir", "C:\\hadoop-2.8.1")
  val spark: SparkSession = SparkSession.builder.appName("KMeans").master("local").getOrCreate

  def main(args: Array[String]): Unit = {

  }

  //sig活跃度聚类
  private def sigClustering(): Unit = {
    //加载数据
    val path: String = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\sig\\sigs.csv"
    val sigDS = spark.read.option("header", value = true).csv(path).na.fill("0")
      .withColumn("prs", col("prs").cast("double"))
      .withColumn("issues", col("issues").cast("double"))
      .withColumn("pr_comments", col("pr_comments").cast("double"))
      .withColumn("issue_comments", col("issue_comments").cast("double"))
      .withColumn("stars", col("stars").cast("double"))
      .withColumn("watchs", col("watchs").cast("double"))
      .withColumn("forks", col("forks").cast("double"))

    //将sig_name字段 转换为数字的label
    val labelIndex = new StringIndexer().setInputCol("sig_name").setOutputCol("label").fit(sigDS)
    //将字段拼接为Vector
    val assembler = new VectorAssembler().setInputCols("prs,issues,pr_comments,issue_comments,stars,forks".split(",")).setOutputCol("features")
    //使用Pipeline 转化模型
    val pipeline = new Pipeline().setStages(Array[PipelineStage](labelIndex, assembler))
    val model = pipeline.fit(sigDS)
    val dataset = model.transform(sigDS)

    //设置聚类数量为3 迭代100次 随机数种子为100
    val kMeans: KMeans = new KMeans().setK(3).setMaxIter(100).setSeed(100L)
    //建立模型
    val kMeansModel = kMeans.fit(dataset).setFeaturesCol("features").setPredictionCol("prediction")
    //输出误差平方和
    val WSSSE: Double = kMeansModel.computeCost(dataset)
    println("误差平方和 " + WSSSE)

    //输出聚类中心
    val centers = kMeansModel.clusterCenters
    print("聚类中心: ")
    centers.foreach(print)

    //输出预测结果
    val transform = kMeansModel.transform(dataset)
    transform.show()
    transform.drop("label", "features").write.mode("overwrite").option("header", value = true).csv("C:\\Users\\Administrator\\Desktop\\用户流失预测\\sig-clustering.csv")
  }

  //sig对应的repo关联
  private def sigReporInfo(): Unit = {
    val sigRepo = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\sig\\sig-repo.csv"
    val sigUserDS = spark.read.option("header", value = true).csv(sigRepo)
      .select(col("sig_name"), concat_ws("", lit("https://gitee.com/"), col("repo_name")).as("gitee_repo"))
    sigUserDS.show()

    val repo = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\repo\\repo.csv"
    val repoDS = spark.read.option("header", value = true).csv(repo)
      .select(col("gitee_repo"), col("prs").cast("double"), col("issues").cast("double"),
        col("pr_comments").cast("double"), col("issue_comments").cast("double")
        , col("stars").cast("double"), col("watchs").cast("double"), col("forks").cast("double"))

    val sigRepoInfoDS = sigUserDS.join(repoDS, sigUserDS("gitee_repo") === repoDS("gitee_repo"), "left")
    val sigs = sigRepoInfoDS.groupBy("sig_name")
      .sum("prs", "issues", "pr_comments", "issue_comments", "stars", "watchs", "forks")
    sigs.coalesce(1).write.mode("overwrite").option("header", value = true).csv("C:\\Users\\Administrator\\Desktop\\用户流失预测\\sigs.csv")

  }

  //user对应的sig关联
  private def userSigInfo(): Unit = {
    val func = udf((str: String) => {
      if (str == "0") "1"
      else if (str == "1") "0"
      else str
    })
    val userRepoSig = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\user_repo_sig_cluster.csv"
    val userRepoSigDS = spark.read.option("header", value = true).csv(userRepoSig)
      .select(col("gitee_id"), func(col("prediction")).as("prediction").cast("int"))
    userRepoSigDS.groupBy("gitee_id").max("prediction")
      .coalesce(1).write.mode("overwrite").option("header", value = true).csv("C:\\Users\\Administrator\\Desktop\\用户流失预测\\user_sigs_cluster.csv")
  }

  //user计算一些其它指标，并关联聚类后的sig
  private def userInfo(): Unit = {
    val user = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\opengauss\\user_new.csv"
    val userDS = spark.read.option("header", value = true).csv(user)

    val prOpenDays = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\opengauss\\pr_open_days.csv"
    val prOpenDaysDS = spark.read.option("header", value = true).csv(prOpenDays)

    val issOpenDays = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\opengauss\\issue_open_days.csv"
    val issOpenDaysDS = spark.read.option("header", value = true).csv(issOpenDays)

    //    val userSigCluster = "C:\\Users\\Administrator\\Desktop\\用户流失预测\\user_sigs_cluster.csv"
    //    val userSigClusterDS = spark.read.option("header", value = true).csv(userSigCluster)
    //      .select(col("gitee_id").as("gitee_id_1"), col("sig_cluster"))


    val temp = userDS.join(prOpenDaysDS, userDS("gitee_id") === prOpenDaysDS("user_login_pr"), "left")
    val temp1 = temp.join(issOpenDaysDS, temp("gitee_id") === issOpenDaysDS("user_login_issue"), "left").na.fill("0")


    def myfunc(strs: String*): Int = strs.map(_.toInt).sum

    val myfuncUDF = udf(myfunc _)
    val func1 = udf((str1: String, str2: String) => if (str2 == "0") 0.0 else str1.toDouble / str2.toDouble)
    //    val func2 = udf((str: String, seq: String*) => seq.map(_.toInt).sum)
    val func3 = udf((str1: String, str2: String) => (str1.trim.toLong - str2.trim.toLong) / 1000 / 60 / 60 / 24)
    val nowMilliSecond = LocalDateTime.now().toInstant(ZoneOffset.of("+8")).toEpochMilli

    val userResDS = temp1.withColumn("pr_merged_percent", func1(col("pr_merged"), col("prs")))
      .withColumn("ci_fail_percent", func1(col("ci_fail"), col("prs")))
      .withColumn("issue_closed_percent", func1(col("issue_closed"), col("issues")))
      .withColumn("issue_rejected_percent", func1(col("issue_rejected"), col("issues")))
      .withColumn("contributes", myfuncUDF(array(col("prs"), col("issues"), col("pr_comments"), col("issue_comments"),
        col("stars"), col("watchs"), col("forks"))))
      .withColumn("contribute_days", func3(col("last_contribute_time"), col("first_contribute_time")))
      .withColumn("last_contribute_to_now", func3(lit(nowMilliSecond), col("last_contribute_time")))
      .withColumn("contribute_freq", func1(col("contributes"), col("contribute_days")))
      .withColumn("pr_open_days_avg", func1(col("pr_open_days"), col("prs")))
      .withColumn("issue_open_days_avg", func1(col("issue_open_days"), col("issues")))

    userResDS /*.join(userSigClusterDS, userResDS("gitee_id") === userSigClusterDS("gitee_id_1"), "left")
      .na.fill(Map("sig_cluster" -> "2"))*/
      .coalesce(1).write.mode("overwrite").option("header", value = true).csv("C:\\Users\\Administrator\\Desktop\\用户流失预测\\opengauss\\user_res.csv")
  }

}
