package com.timeriver.cases.power_prediction.v3

import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.regression.RandomForestRegressor
import org.apache.spark.ml.tuning._
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DateType, DoubleType, IntegerType}

/**
  * 使用参数网格自动调参
  */
object PowerRegressionPredict {
  def main(args: Array[String]): Unit = {
    val session: SparkSession = SparkSession.builder()
      .appName("基于随机森林回归模型进行镇江高新区电力预测")
      .master("local[*]")
      .getOrCreate()

    val rawData: DataFrame = session.read.format("csv")
      .option("header", true)
      .load("D:\\workspace\\gitee_space\\spark-ml-machine-learning\\data\\zhenjiang_power.csv")

    val data: DataFrame = rawData.withColumn("record_date", col("record_date").cast(DateType))
      .withColumn("power_consumption", col("power_consumption").cast(DoubleType))
      .withColumn("user_id", col("user_id").cast(IntegerType))

    val num: Long = data.select("user_id").distinct().count()
    println(s"企业数量统计：$num")

    /** 地区每日耗电量 */
    val daily_power: DataFrame = data.groupBy("record_date").sum("power_consumption").sort("record_date")

    /** 构造数据特征：月份/星期几/一年的第几周 */
    val newData: DataFrame = daily_power.withColumn("month", month(column("record_date")))
      .withColumn("dayOfWeek", dayofweek(column("record_date")))
      .withColumn("weekOfYear", weekofyear(column("record_date")))
      .withColumn("dayOfMonth", dayofmonth(column("record_date")))
      .withColumnRenamed("sum(power_consumption)", "label")

    /** 构造数据特征：是否是周末 */
    val newData_1: DataFrame = newData.selectExpr("*",
      "case when dayOfWeek=6 then 1 when dayOfWeek=7 then 1 else 0 end as weekdayOrNot",
      "case when dayOfMonth between 1 and 10 then 1 when dayOfMonth between 11 and 20 then 2 else 3 end as tenDays")

    val trainData: DataFrame = new VectorAssembler()
      .setInputCols(Array("month", "dayOfWeek", "weekOfYear", "weekdayOrNot", "dayOfMonth", "tenDays"))
      .setOutputCol("features")
      .transform(newData_1)

    val count: Long = trainData.count()
    println(s"训练数据量：$count")

    /** 划分训练集和测试集 */
    val train: Dataset[Row] = trainData.where("record_date >= '2015-01-01' and record_date <= '2016-07-31'")
    val test: Dataset[Row] = trainData.where("record_date >= '2016-08-01' and record_date <= '2016-08-31'")

    /** 建立随机森林回归模型 */
    val regressor: RandomForestRegressor = new RandomForestRegressor()
      .setLabelCol("label")
      .setFeaturesCol("features")
      .setSeed(123)

    /** 构建参数网格搜索：参数调优 */
    val paramMaps: Array[ParamMap] = new ParamGridBuilder()
      .addGrid(regressor.numTrees, Array(5, 10, 20, 40))
      .addGrid(regressor.maxDepth, Array(3, 5, 7, 9))
      .build()

    val validator: TrainValidationSplit = new TrainValidationSplit()
      .setEstimator(regressor)
      .setEvaluator(new RegressionEvaluator())
      .setEstimatorParamMaps(paramMaps)
      .setParallelism(2)

    /** 训练模型 */
    val model: TrainValidationSplitModel = validator.fit(train)

    /** 预测模型 */
    val res: DataFrame = model.transform(test)
    res.show(5, false)

    val evaluator: RegressionEvaluator = new RegressionEvaluator()
      .setPredictionCol("prediction")
      .setLabelCol("label")
      .setMetricName("r2")

    val r2: Double = evaluator.evaluate(res)
    println(s"r2的值为：$r2")

    session.stop()
  }
}
