package com.spark.mooc.ch8_sparkMLlib
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.regression.{LinearRegression, LinearRegressionModel, LinearRegressionTrainingSummary}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
/**
 * @description: 新冠肺炎大数据分析及预测案例实战
 * @time: 2021/6/20 14:38
 * @author: lhy
 */
object LR {
    Logger.getLogger("org").setLevel(Level.WARN)
    def main(args: Array[String]): Unit = {
        val spark: SparkSession = SparkSession.builder().appName("LR").master("local[*]").getOrCreate()
        /**
         * 准备训练数据
         */
        val train_data: RDD[String] = spark.sparkContext.textFile("input/COVID-19/train/000000_0")
        val train_map_data: RDD[(Double, Double, Double, Double)] = train_data.map{ row =>
            val split: Array[String] = row.split(",")
            (split(0).toDouble,split(1).toDouble,split(2).toDouble,split(3).toDouble)
        }
        val df: DataFrame = spark.sqlContext.createDataFrame(train_map_data)
        val colArray: Array[String] = Array("suspected","death","recovered")
        val train_df: DataFrame = df.toDF(colArray(0),colArray(1),colArray(2),"label")
//        train_df.show()
        // 向量化
        val assembler: VectorAssembler = new VectorAssembler().setInputCols(colArray).setOutputCol("features")
        // train_df转换为向量化
        val vectorDF: DataFrame = assembler.transform(train_df)
        // 拆分数据
        val split_data: Array[Dataset[Row]] = vectorDF.randomSplit(Array(0.8,0.2))
        /**
         * 创建模型
         */
        val linearRegression: LinearRegression = new LinearRegression()
                                                    .setFeaturesCol("features")
                                                    .setLabelCol("label")
                                                    .setFitIntercept(true)
                                                    .setMaxIter(60)
                                                    .setRegParam(0.3)
                                                    .setElasticNetParam(0.8)
        /**
         * 训练模型
         */
        val lrModel: LinearRegressionModel = linearRegression.fit(split_data(0))
        /**
         * 评估模型
         */
        val trainingSummary: LinearRegressionTrainingSummary = lrModel.summary
        // 迭代次数
        println(s"numIterations: ${trainingSummary.totalIterations}")
        // 均方根误差
        println(s"RMSE: ${trainingSummary.rootMeanSquaredError}")

        /**
         * 测试
         */
        val predictions: DataFrame = lrModel.transform(split_data(1))
        val predict_test: DataFrame = predictions.selectExpr("features","label","round(prediction,1) as prediction")
        predict_test.foreach(println(_))

        /**
         * 准备预测数据
         */
        val predict_data = new Array[RDD[String]](5)
        val predict_map_data = new Array[RDD[(Double, Double, Double)]](5)
        val df_ = new Array[DataFrame](5)
        val vectorDF_ = new Array[DataFrame](5)
        val city: Array[String] = Array("beijing","hebei","hubei","hunan","shanghai")
        for (i <- 0 to 4){
            predict_data(i) = spark.sparkContext.textFile("input/COVID-19/predict/"+city(i))
            predict_map_data(i) = predict_data(i).map{ row =>
                val split: Array[String] = row.split(",")
                (split(0).toDouble,split(1).toDouble,split(2).toDouble)
            }
            df_(i) = spark.sqlContext.createDataFrame(predict_map_data(i))
            df_(i) = df_(i).toDF(colArray(0),colArray(1),colArray(2))
            val assembler_ : VectorAssembler = new VectorAssembler().setInputCols(colArray).setOutputCol("features")
            vectorDF_(i) = assembler_.transform(df_(i))
        }
        /**
         * 预测各城市
         */
        for (i <- 0 to 4){
            val predictions: DataFrame = lrModel.transform(vectorDF_(i))
            val predict_result: DataFrame = predictions.selectExpr("features","round(prediction,1) as prediction")
            println("============="+"predict "+city(i)+"=============")
            predict_result.foreach(println(_))
        }

    }
}
