package org.calrissian.spark.jetty.service

import java.io.FileInputStream
import java.net.{URI, URL}
import java.util.Properties

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
/**
  */
class PutDataService(sparkContext: SparkContext){




  def PutDataFromDefaultDataBase(table_name: String,path: String,format: String): String ={

    val full_path=path+"/"+table_name+"."+format
    val sqlContext = new SQLContext(sparkContext)
    val props = new Properties()
    val p_path = Thread.currentThread().getContextClassLoader.getResource("db.properties").getPath
    props.load(new FileInputStream(p_path))
    val df=sqlContext.read.format("jdbc")
      .option("url",props.getProperty("db.url"))
      .option("dbtable",table_name)
      .option("user", props.getProperty("db.username"))
      .option("password", props.getProperty("db.password"))
      .option("driver",props.getProperty("db.driverClassName"))
      .load
       df.show()

    val fs=FileSystem.get(new URI(path),new Configuration())
    if(fs.exists(new Path(full_path))) return "Path Exist"
    df.write.format(format).save(full_path)
    "Success"
  }

}
