package com.njbdqn.util

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

/**
  * hdfs操作
  */
object HDFSConnection {
  val map = ReadPropertiesFileTool.readProperty("hadoop")
  /**
    * 将数据写入到hdfs
    */
  def writeDataToHDFS(path:String,df:DataFrame) = {
    df.write.mode(SaveMode.Overwrite).save(map.get("hadoop_url").get+path)
  }

  /**
    * 从hdfs的指定位置读到内存中
    */
  def readDataFromHDFS(spark:SparkSession,path:String)={
    spark.read.parquet(map.get("hadoop_url").get+path)
  }

  /**
    * 从hdfs上读取LR算法模型
    */
  def readLRModelFromHDFS(path:String) = {
    LogisticRegressionModel.load(map.get("hadoop_url").get+path)
  }

  /**
    * 将LR模型写入到HDFS
    */
  def writeLRModelToHDFS(lr:LogisticRegressionModel,path:String)  ={
    lr.save(map.get("hadoop_url").get+path)
  }
}
