package hamster.csustef.warehouse

import java.util.Properties

import org.apache.spark.mllib.linalg
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionModel, LinearRegressionWithSGD}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DoubleType, IntegerType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

object LinearRegressionApplication {

  def main(arg: Array[String]): Unit = {
    val sparkConf = new SparkConf()
    sparkConf.setMaster("local[*]")
      .set("spark.sql.crossJoin.enabled", "true")
      .set("spark.debug.maxToStringFields", "100")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .set("spark.locality.wait", "10")
      .set("spark.storage.memoryFraction", "0.4")
      .set("spark.shuffle.consolidateFiles", "true")
      .set("spark.reducer.maxSizeInFlight", "96") //reduce task的拉取缓存，默认48m
      .set("spark.shuffle.file.buffer", "64") //：map task的写磁盘缓存，默认32k
      .set("spark.shuffle.memoryFraction", "0.4") //：用于reduce端聚合的内存比例，默认0.2，超过比例就会溢
      .setAppName("LinearRegression")
    val sc = new SparkContext(sparkConf)

    //    val data_path1 = "/root/IdeaProjects/csust_electric_fee_dw/resources/dws.consume_weather_hour_dorm.txt"
    val trainingDataPath = "hdfs://master:8020/user/hive/warehouse/dws.db/dws_csustef_consume_weather_day_dorm/*"
    val trainingData: RDD[String] = sc.textFile(trainingDataPath).map((line: String) => new String(line.getBytes(), "UTF-8"))

    val predictDataPath = "hdfs://master:8020/user/hive/warehouse/tmp.db/tmp_dws_csustef_consume_weather_day_dorm/*"
    val predictData: RDD[String] = sc.textFile(predictDataPath).map((line: String) => new String(line.getBytes(), "UTF-8"))

    // data.take(3).foreach(println)

    // 1~5: 106:B103,20220617,23,167,2022,
    // 6~10: 6,17,267.56,230000,23:00:00,
    // 11~15: 0,0,82800,2022-06-17,2,
    // 16~20: 5,3,0,0,0,
    // 21~25: 1,0,24.5,25.0,24.0,
    // 26~30: 89.1,91.0,88.0,1.0,1.0,
    // 31~35: 1.0,13.1,14.0,13.0,10,
    // 36~38: B103,106,行健轩2栋B区
    //    val examples: RDD[LabeledPoint] = data.map((line: String) => line.split(',')).map {
    //      parts: Array[String] =>
    //        val year: Double = parts(4).toDouble
    //        val month: Double = parts(5).toDouble
    //        val day: Double = parts(6).toDouble
    //        val hour: Double = parts(2).toDouble
    //        val building_id: Double = parts(36).toDouble
    //        var room_id = ""
    //        if (parts(35).startsWith("A") || parts(35).startsWith("B") || parts(35).startsWith("C")) room_id = parts(35).substring(1)
    //        else room_id = parts(35)
    //        val consume: Double = parts(7).toDouble
    //        val temperature: Double = parts(21).toDouble
    //        val humidity: Double = parts(24).toDouble
    //        val wind_level: Double = parts(27).toDouble
    //        val air_condition: Double = parts(30).toDouble
    //        LabeledPoint(consume, Vectors.dense(month, day, hour,
    //          building_id, room_id.toDouble,
    //          consume, temperature, humidity, wind_level, air_condition))
    //    }.filter(e => e.label > 0)
    //      .sortBy(_.label)
    //      .cache()

    // 1~5: 99:A616,20220512,131,1469.99,2022-05-12,
    // 6~10: 2022,2,5,12,4,
    // 11~15: 2,0,0,0,1,
    // 16~20: 0,19.97,21.0,19.0,92.8,
    // 21~25: 97.0,88.0,0.83,1.0,0.0,
    // 26~30: 76.88,83.0,69.0,99,A616,
    // 31~32: 99,行健轩2栋A区


    val trains: RDD[LabeledPoint] = trainingData.map((line: String) => line.split(',')).filter(_.length == 32).map {
      parts: Array[String] =>
        val consume: Double = parts(3).toDouble
        val year: Double = parts(5).toDouble
        val month: Double = parts(7).toDouble
        val day: Double = parts(8).toDouble
        val day_in_week: Double = parts(9).toDouble
        val week_in_month: Double = parts(10).toDouble
        val is_first_day_in_month: Double = parts(11).toDouble
        val is_last_day_in_month: Double = parts(12).toDouble
        val is_holiday: Double = parts(13).toDouble
        val is_workday: Double = parts(14).toDouble
        val is_weekend: Double = parts(15).toDouble
        val temperature: Double = parts(16).toDouble
        val humidity: Double = parts(19).toDouble
        val wind_level: Double = parts(22).toDouble
        val air_condition: Double = parts(25).toDouble
        var room_id = ""
        if (parts(29).startsWith("A") || parts(29).startsWith("B") || parts(29).startsWith("C"))
          room_id = parts(29).substring(1)
        else room_id = parts(29)
        val building_id: Double = parts(30).toDouble

        LabeledPoint(consume, Vectors.dense(
          year, month, day,
          day_in_week, week_in_month,
          is_first_day_in_month, is_last_day_in_month, is_holiday, is_workday, is_weekend,
          building_id, room_id.toDouble,
          consume, temperature, humidity, temperature * humidity, wind_level, air_condition))
    }.filter((e: LabeledPoint) => e.label > 0)
      .cache()

    val predictionFeature: RDD[LabeledPoint] = predictData.map((line: String) => line.split(',')).filter(_.length == 32).map {
      parts: Array[String] =>
        val consume: Double = parts(3).toDouble
        val year: Double = parts(5).toDouble
        val month: Double = parts(7).toDouble
        val day: Double = parts(8).toDouble
        val day_in_week: Double = parts(9).toDouble
        val week_in_month: Double = parts(10).toDouble
        val is_first_day_in_month: Double = parts(11).toDouble
        val is_last_day_in_month: Double = parts(12).toDouble
        val is_holiday: Double = parts(13).toDouble
        val is_workday: Double = parts(14).toDouble
        val is_weekend: Double = parts(15).toDouble
        val temperature: Double = parts(16).toDouble
        val humidity: Double = parts(19).toDouble
        val wind_level: Double = parts(22).toDouble
        val air_condition: Double = parts(25).toDouble
        var room_id = ""
        if (parts(29).startsWith("A") || parts(29).startsWith("B") || parts(29).startsWith("C"))
          room_id = parts(29).substring(1)
        else room_id = parts(29)
        val building_id: Double = parts(30).toDouble

        LabeledPoint(consume, Vectors.dense(
          year, month, day,
          day_in_week, week_in_month,
          is_first_day_in_month, is_last_day_in_month, is_holiday, is_workday, is_weekend,
          building_id, room_id.toDouble,
          consume, temperature, humidity, temperature * humidity, wind_level, air_condition))
    }.filter((e: LabeledPoint) => e.label > 0)
      .cache()


    //stepSize对训练效果至关重要，默认是1
    val stepSize = 0.00000000042

    //尝试不同的迭代次数
    val algorithm = new LinearRegressionWithSGD()

    algorithm.optimizer.setNumIterations(2).setStepSize(stepSize)

    val model: LinearRegressionModel = algorithm.run(trains)

    val prediction: RDD[Double] = model.predict(predictionFeature.map((_: LabeledPoint).features))

    val predictionAndLabel: RDD[(Double, linalg.Vector)] = prediction.zip(predictionFeature.map((_: LabeledPoint).features))

    //    predictionAndLabel.foreach(println)

    //    val a: Double = prediction
    //      .zip(trains.map((_: LabeledPoint).label))
    //      .map { case (v, p) => math.pow((v - p), 2) }
    //      .reduce((_: Double) + (_: Double))
    //    val predictionAndLabelMSE: Double = math.sqrt(a / prediction.count)
    //    println(s" training Mean Squared Error $predictionAndLabelMSE")

    //通过RDD，配合样例类，将我们的数据转换成样例类对象

    //导入sparkSession当中的隐式转换，将我们的样例类对象转换成DataFrame


    val sparkSession: SparkSession = SparkSession.builder().master("local[*]")
      .appName("SparkExecuteSqlFile")
      .getOrCreate()

    //第一步：将string类型的rdd转换成row类型的rdd
    val row: RDD[Row] = predictionAndLabel
      .map((x: (Double, linalg.Vector)) => Row(
        x._2(0).toInt,
        x._2(1).toInt,
        x._2(2).toInt,
        x._1,
        x._2(10).toInt,
        x._2(11).toInt,
        x._2(12),
        x._2(13),
        x._2(14),
        x._2(16),
        x._2(17)
      ))

    //第二部:使用StructType 创建一个schema信息
    val struct: StructType = StructType(
      //StructType里放StructField,有三个参数
      //1.字段名
      //2.type类型的字段类型
      //3.是否为空
      StructField("year", IntegerType, false) ::
        StructField("month", IntegerType, false) ::
        StructField("day", IntegerType, false) ::
        StructField("predict_consume", DoubleType, false) ::
        StructField("building_id", IntegerType, false) ::
        StructField("room_id", IntegerType, false) ::
        StructField("consume", DoubleType, false) ::
        StructField("temperature", DoubleType, false) ::
        StructField("humidity", DoubleType, false) ::
        StructField("wind_level", DoubleType, false) ::
        StructField("air_condition", DoubleType, false) ::
        Nil
    )
    //第三部结合一二两步
    val df: DataFrame = sparkSession.createDataFrame(row, struct)

    //将DataFrame注册成为一张表模型
    df.createTempView("df_view")

    //获取表当中的数据
    val result: DataFrame = sparkSession.sql("select * from df_view")

    //获取mysql连接
    val url = "jdbc:mysql://localhost:3306/flink"
    val tableName = "df_view"
    val properties = new Properties()
    properties.setProperty("user", "root")
    properties.setProperty("password", "OBUIVYyhm20010921.")

    //将我们查询的结果写入到mysql当中去
    result.write.mode(SaveMode.Overwrite).jdbc(url, tableName, properties)
    sc.stop()
  }
}
