package com.edata.bigdata.postgres
import com.edata.bigdata.annotations.Edata_Loader
import com.edata.bigdata.basic.Loader
import org.apache.spark.sql.{DataFrame, SparkSession}

@Edata_Loader(target = "PGLOADER")
class PgLoader extends Loader with PgConnector {
  override var session: SparkSession = _
  override var sourceType: String = "PG"
  override def find(args: String*): DataFrame = {
    if(args.isEmpty){
      LOGGER.error("args should not be empty")
      return null
    }
    LOGGER.info(s"executing SQL:${args(0)}")
    session.read.format("jdbc")
      .option("url", JDBC_PREFIX + JDBC_IP + ":" + JDBC_PORT + "/" + JDBC_DATABASE)
      .option("user", JDBC_USER)
      .option("password", JDBC_PASSWORD)
      .option("query", args(0)).load()
  }
}
