package com.njbdqn.util

import org.apache.spark.ml.classification.LogisticRegressionModel
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object HdfsConnection {
  val param_map = ReadPropertiesFileTool.readProperty("hadoop")

  /**
   * 将数据库写入到hdfs
   */
  def writeDataToHdfs(path:String,df:DataFrame)={
    df.write.mode(SaveMode.Overwrite).save(param_map("hadoop_url")+path)
  }

  /**
   * 从hdfs的指定位置读到内存中
   */
  def readDataFromHdfs(spark:SparkSession,path:String)={
    spark.read.parquet(param_map("hadoop_url")+path)
  }

  /**
   * 从 hdfs中读取LR算法的模型
   */
  def readLRModeFromHdfs(path:String)={
    LogisticRegressionModel.load(param_map("hadoop_url")+path)
  }

  /**
   *将指定的LR算法模型写入hdfs中
   */
  def writeLRModeToHdfs(lr:LogisticRegressionModel,path:String)={
    lr.save(param_map("hadoop_url")+path)
  }


}
