package mlib

import java.time.{LocalDate, LocalDateTime, ZoneOffset}

import com.htiiot.store.model.DeviceNumber
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.log4j.Logger
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.regression.LinearRegression
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import tools.ZookeeperClient

import scala.collection.mutable.ArrayBuffer

/**
  * 测点劣化分析(线性回归)
  *
  * @author :chensi
  */
object DegradationPredict {
  private def log = Logger.getLogger(DegradationPredict.getClass)

  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME", "hdfs");
    if (args.length == 0) {
      log.error("Please input metric degradation path and zookeeper path!")
      System.exit(1)
    }
    val spark = SparkSession.builder().appName("MetricDegradationModel")
      .getOrCreate()
    spark.sparkContext.setLogLevel("INFO")
    import spark.implicits._
    val dataPath = args(0)
    val fs = FileSystem.get(spark.sparkContext.hadoopConfiguration)
    val files = fs.listStatus(new Path(dataPath))
    val arrayBuffer = new ArrayBuffer[RDD[(Double, Vector)]]()
    files.foreach(x => {
      val p = x.getPath
      val timeArr = p.getName.split("-")
      val time = LocalDateTime.of(timeArr(0).toInt, timeArr(1).toInt, timeArr(2).toInt, 0, 0).toEpochSecond(ZoneOffset.of("+8"))
      val rdd = spark.sparkContext.textFile(p.toString).map(line => {
        val ss = line.split("\\t")
        (ss(1).toDouble, Vectors.dense(DeviceNumber.fromHexString(ss(0)).getComponentIdLong.toDouble, time.toDouble))
      }
      )
      arrayBuffer.+=(rdd)
    })
    var unionRdd = spark.sparkContext.emptyRDD[(Double, Vector)]
    arrayBuffer.foreach(rdd => {
      unionRdd = unionRdd.union(rdd)
    })
    val training = unionRdd.toDF("label", "features")
//    println("Training Data:")
//    training.show(false)
    val lr = new LinearRegression()
      .setMaxIter(5000)
      .setRegParam(0.3)
      .setElasticNetParam(0.7)
    val lrModel = lr.fit(training)
    //日志打印训练模型结果
    log.info(s"=========================${LocalDate.now}=========================")
    files.foreach(f => {
      log.info(s"劣化数据路径:${f.getPath}")
    })
    log.info("Model Summary:")
    val trainingSummary = lrModel.summary
    log.info(s"numIterations: ${trainingSummary.totalIterations}")
    log.info(s"objectiveHistory: [${trainingSummary.objectiveHistory.mkString(",")}]")
    log.info(s"RMSE: ${trainingSummary.rootMeanSquaredError}")
    log.info(s"r2: ${trainingSummary.r2}")
    //保存模型
    val zkClient = new ZookeeperClient
    val zkAddr = args(1)
    val zk = zkClient.getZk(zkAddr, 30000)
    var prop = zkClient.getAll(zk, "/conf_htiiot/metricpredict")
    lrModel.write.overwrite().save(prop.getProperty("model.path"))
    spark.stop()
  }
}
