package com.edata.bigdata.spark;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Properties;

public class PgReader {
    public Logger logger = LoggerFactory.getLogger(this.getClass());
    public PgConnector connector;
    public SparkSession session;

    public Dataset<Row> findData(String query) {
        logger.info("Executing SQL: {}", query);
        return session.read().format("jdbc")
                .option("url", connector.getURL())
                .option("user", connector.getJDBC_USER())
                .option("password", connector.getJDBC_PASSWORD())
                .option("query", query)
                .load();
    }

    public <T> Dataset<T> toBeanMapper(Dataset<Row> rows, Class<T> clazz) {
        if (rows == null) {
            logger.error("rows为空");
            return null;
        }
        if (clazz == null) {
            logger.error("clazz为空");
            return null;
        }
        logger.info("正在从数据类型 Dataset<Row> 映射为 Dataset<{}>", clazz.getSimpleName());
        return rows.as(Encoders.bean(clazz));
    }

    public PgReader(SparkSession session) {
        this.session = session;
    }

}
