package com.hliushi.spark.machine_learn

import org.apache.spark.ml.feature.{OneHotEncoderEstimator, QuantileDiscretizer, StringIndexer, VectorAssembler}
import org.apache.spark.ml.regression.RandomForestRegressor
import org.apache.spark.sql.types.{DoubleType, LongType}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * descriptions:  房价预测
 *
 * author: Hliushi
 * date: 2021/6/17 14:28
 */
object ForecastHousePrice {

  def main(args: Array[String]): Unit = {
    // 1.读取数据
    val spark = SparkSession.builder()
      .appName("forecast_house_price")
      .master("local[6]")
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import spark.implicits._

    val source = spark.read
      .option("header", value = true)
      .option("delimiter", value = ",")
      .option("inferSchema", value = true)
      .csv("dataset/house_prices_train.csv")

    //  to_level _  -> 后面那个下划线是 把 to_level方法转为 函数
    val udf_to_level = udf(to_level _)
    source.createOrReplaceTempView("house_price")
    // 2.查看元数据, 看看有什么特质, 那一列是标签
    //source.printSchema()


    /**
     * def describe(cols: String*): DataFrame
     * 计算数字和字符串列的统计信息，包括计数、平均值、标准偏差、最小值和 最大值。
     * 如果没有给出列，则此函数计算所有数字或字符串列的统计信息。
     */
    source.describe("SalePrice").show()
    //  +-------+------------------+
    //  |summary|         SalePrice|
    //  +-------+------------------+
    //  |  count|              1460|
    //  |   mean|180921.19589041095|
    //  | stddev| 79442.50288288663|
    //  |    min|             34900|
    //  |    max|            755000|
    //  +-------+------------------+

    // 处理数据, 绘制直方图
    // 把数据均分为不同的组
    val salePrice = source.select($"Id", $"SalePrice" cast (LongType))

    val discretizer = new QuantileDiscretizer()
      .setInputCol("SalePrice")
      .setOutputCol("Bin")
      .setNumBuckets(50)

    discretizer.fit(salePrice)
      .transform(salePrice)
      .groupBy($"Bin")
      .agg(avg($"SalePrice") as "avg_price", count($"Bin") as "count")
      .createOrReplaceTempView("hist_price")

    spark.sql("select * from hist_price order by Bin").show()
    //  +----+------------------+-----+
    //  | Bin|         avg_price|count|
    //  +----+------------------+-----+
    //  | 0.0| 59649.53571428572|   28|
    //  | 1.0|  81489.6551724138|   29|
    //  | 2.0| 87835.20689655172|   29|
    //  | 3.0| 95366.07142857143|   28|
    //  | 4.0|102556.28571428571|   28|
    //  | 5.0|107833.23333333334|   30|
    //  +----+------------------+-----+
    /**
     * 组合新特征
     */
    source.select($"TotalBsmtSF" + col("1stFlrSF") + col("2ndFlrSF") as "TotalSF")
      .show()

    // 可能包含字符串枚举的一些列 需要进行特征编码
    val cols = Array(
      "BsmtFinType1", "MasVnrType", "Foundation",
      "HouseStyle", "Functional", "BsmtExposure",
      "GarageFinish", "Street", "ExterQual",
      "PavedDrive", "ExterCond", "KitchenQual",
      "HeatingQC", "BsmtQual", "FireplaceQu",
      "GarageQual", "PoolQC"
    )

    /**
     * cols: Array[Column] = cols.map(str: String => col(str)
     * #
     * cols: _*  这里是将 cols: Array[Column]  -> col: Column* 可变长参数类型
     * # 下面两种是 选择全部col列的方式  .类似SQL中  select *
     * source.select(cols: _*).show()
     * source.select("", cols: _*)
     */
    var indexedDF: DataFrame = null

    for (col <- cols) {
      // StringIndexer 把对应的字符串转为 数值型的特质编码
      val stringIndexer = new StringIndexer()
        .setInputCol(col)
        .setOutputCol(s"${col}_indexer")

      if (indexedDF == null) {
        indexedDF = stringIndexer.fit(source).transform(source)
      } else {
        indexedDF = stringIndexer.fit(indexedDF).transform(indexedDF)
      }
    }

    val indexedCols = cols.map((col: String) => s"${col}_indexer")
      .map((c: String) => col(c))

    indexedDF.select(indexedCols: _*).show()

    //  +--------------------+------------------+------------------+------------------+------------------+--------------------+--------------------+--------------+-----------------+------------------+-----------------+-------------------+-----------------+----------------+-------------------+------------------+--------------+
    //  |BsmtFinType1_indexer|MasVnrType_indexer|Foundation_indexer|HouseStyle_indexer|Functional_indexer|BsmtExposure_indexer|GarageFinish_indexer|Street_indexer|ExterQual_indexer|PavedDrive_indexer|ExterCond_indexer|KitchenQual_indexer|HeatingQC_indexer|BsmtQual_indexer|FireplaceQu_indexer|GarageQual_indexer|PoolQC_indexer|
    //  +--------------------+------------------+------------------+------------------+------------------+--------------------+--------------------+--------------+-----------------+------------------+-----------------+-------------------+-----------------+----------------+-------------------+------------------+--------------+
    //  |                 1.0|               1.0|               0.0|               1.0|               0.0|                 0.0|                 1.0|           0.0|              1.0|               0.0|              0.0|                1.0|              0.0|             1.0|                0.0|               0.0|           0.0|
    //  |                 2.0|               0.0|               1.0|               0.0|               0.0|                 2.0|                 1.0|           0.0|              0.0|               0.0|              0.0|                0.0|              0.0|             1.0|                2.0|               0.0|           0.0|
    //  |                 1.0|               1.0|               0.0|               1.0|               0.0|                 3.0|                 1.0|           0.0|              1.0|               0.0|              0.0|                1.0|              0.0|             1.0|                2.0|               0.0|           0.0|
    //  |                 2.0|               0.0|               2.0|               1.0|               0.0|                 0.0|                 0.0|           0.0|              0.0|               0.0|              0.0|                1.0|              2.0|             0.0|                1.0|               0.0|           0.0|
    //  +--------------------+------------------+------------------+------------------+------------------+--------------------+--------------------+--------------+-----------------+------------------+-----------------+-------------------+-----------------+----------------+-------------------+------------------+--------------+

    /**
     * OneHotEncoderEstimator 是 Spark 2.3.0 版本才有这个方法, 但是我这个Spark版本是2.2.0 所有没这个方法
     * 2021年6月17日18:02:21  暂时的解决方案 将pom.xml文件的spark版本提升为 2.3.0
     */
    val oneHotEncoderEstimator = new OneHotEncoderEstimator()
      .setInputCols(cols.map((col: String) => s"${col}_indexer"))
      .setOutputCols(cols.map((col: String) => s"${col}_onehot"))

    /**
     * fit负责找到规律
     * transform负责执行转换
     * 模型训练的时候, 使用fit训练模型, 生成model, 使用transform生成对应结果
     */
    val oneHotDF = oneHotEncoderEstimator
      .fit(indexedDF).transform(indexedDF)

    val oneHotCols = cols
      .map((col: String) => s"${col}_onehot")
      .map((c: String) => col(c))
    oneHotDF.select(oneHotCols: _*).show()
    //+-------------------+-----------------+-----------------+-----------------+-----------------+-------------------+-------------------+-------------+----------------+-----------------+----------------+------------------+----------------+---------------+------------------+-----------------+-------------+
    //|BsmtFinType1_onehot|MasVnrType_onehot|Foundation_onehot|HouseStyle_onehot|Functional_onehot|BsmtExposure_onehot|GarageFinish_onehot|Street_onehot|ExterQual_onehot|PavedDrive_onehot|ExterCond_onehot|KitchenQual_onehot|HeatingQC_onehot|BsmtQual_onehot|FireplaceQu_onehot|GarageQual_onehot|PoolQC_onehot|
    //+-------------------+-----------------+-----------------+-----------------+-----------------+-------------------+-------------------+-------------+----------------+-----------------+----------------+------------------+----------------+---------------+------------------+-----------------+-------------+
    //|      (6,[1],[1.0])|    (4,[1],[1.0])|    (5,[0],[1.0])|    (7,[1],[1.0])|    (6,[0],[1.0])|      (4,[0],[1.0])|      (3,[1],[1.0])|(1,[0],[1.0])|   (3,[1],[1.0])|    (2,[0],[1.0])|   (4,[0],[1.0])|     (3,[1],[1.0])|   (4,[0],[1.0])|  (4,[1],[1.0])|     (5,[0],[1.0])|    (5,[0],[1.0])|(3,[0],[1.0])|
    //|      (6,[2],[1.0])|    (4,[0],[1.0])|    (5,[1],[1.0])|    (7,[0],[1.0])|    (6,[0],[1.0])|      (4,[2],[1.0])|      (3,[1],[1.0])|(1,[0],[1.0])|   (3,[0],[1.0])|    (2,[0],[1.0])|   (4,[0],[1.0])|     (3,[0],[1.0])|   (4,[0],[1.0])|  (4,[1],[1.0])|     (5,[2],[1.0])|    (5,[0],[1.0])|(3,[0],[1.0])|
    //|      (6,[1],[1.0])|    (4,[1],[1.0])|    (5,[0],[1.0])|    (7,[1],[1.0])|    (6,[0],[1.0])|      (4,[3],[1.0])|      (3,[1],[1.0])|(1,[0],[1.0])|   (3,[1],[1.0])|    (2,[0],[1.0])|   (4,[0],[1.0])|     (3,[1],[1.0])|   (4,[0],[1.0])|  (4,[1],[1.0])|     (5,[2],[1.0])|    (5,[0],[1.0])|(3,[0],[1.0])|
    //|      (6,[2],[1.0])|    (4,[0],[1.0])|    (5,[2],[1.0])|    (7,[1],[1.0])|    (6,[0],[1.0])|      (4,[0],[1.0])|      (3,[0],[1.0])|(1,[0],[1.0])|   (3,[0],[1.0])|    (2,[0],[1.0])|   (4,[0],[1.0])|     (3,[1],[1.0])|   (4,[2],[1.0])|  (4,[0],[1.0])|     (5,[1],[1.0])|    (5,[0],[1.0])|(3,[0],[1.0])|
    //+-------------------+-----------------+-----------------+-----------------+-----------------+-------------------+-------------------+-------------+----------------+-----------------+----------------+------------------+----------------+---------------+------------------+-----------------+-------------+

    /**
     * 对于很多机器学习算法, 输入的数据只能有一列, 不能有多一个列
     */
    val vectorAssembler = new VectorAssembler()
      .setInputCols(cols.map((col: String) => s"${col}_onehot"))
      .setOutputCol("features")

    val vectorDF = vectorAssembler.transform(oneHotDF)

    vectorDF.select("features").show()
    //+--------------------+
    //|            features|
    //+--------------------+
    //|(69,[1,7,10,16,22...|
    //|(69,[2,6,11,15,22...|
    //|(69,[1,7,10,16,22...|
    //|(69,[2,6,12,16,22...|
    //+--------------------+

    /**
     * 选择算法: 决策树
     * 创建回归算法工具 regressor
     */
    val regressor = new RandomForestRegressor()
      .setMaxDepth(5)
      .setImpurity("variance")
      .setFeaturesCol("features")
      .setLabelCol("SalePrice")
      .setPredictionCol("prediction")

    // Fit来训练模型,通过算法学习数据规律, 生成Model
    // 通过Model进行Transform, 预测
    regressor.fit(vectorDF.select($"features", $"SalePrice" cast DoubleType))
      .transform(vectorDF)
      .select($"SalePrice", $"prediction")
      .show()

    // 最终的房价预测结果,  左边SalePrice是真实的价格, 右边prediction 是机器学习预测的结果
    // +---------+------------------+
    // |SalePrice|        prediction|
    // +---------+------------------+
    // |   208500|197380.76844238947|
    // |   181500| 191125.1652178299|
    // |   223500| 236942.6564038521|
    // |   140000|170179.48362051448|
    // |   250000| 236942.6564038521|
    // |   143000| 132648.2945054646|
    // |   307000| 279978.1228581191|
    // |   200000|169904.59411819646|
    // |   129900|148401.82998948198|
    // +---------+------------------+
  }


  /**
   * 特征编码
   * .  1.定义顺序  2.应用顺序
   * #
   * 有一些特征, 存在明显的顺序性, 比如说身高分三档, 矮中高, 就是明显具备这种顺序性
   * 比如将文字形式的特征表示为具备顺序性的123时, 会提升 "学习" 效果, 让计算机也可以识别我们所理解的顺序
   *
   * @param q
   * @return
   */
  def to_level(q: String): Int = {
    q match {
      case "Ex" => 1
      case "Gd" => 2
      case "TA" => 3
      case "Fa" => 4
      case _ => 0
    }
  }

  /**
   * 特征编码
   *
   * 编码原理步骤
   * .  1. A -> 1, B -> 2, 编码要记录下俩
   * .  2. 计算机无法理解字符串, 无法计算字符串(大部分的算法不行), 但是人类也不好理解数字, 在计算完成, 要转回原字符串
   * #
   * 编写步骤
   *  1.查看字符枚举的列
   *  2.转成数值型
   *  3.OneHot编码
   * #
   * OneHot 编码:
   * A -> 1, B -> 2 Indexer, 猫 -> 1 狗 -> 2 兔 -> 3
   * OneHot, 一个列变为多个列, 把一个值变成一个向量, 更符合数学计算
   */
}