package com.ctbri.manage.quality.original;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.StructType;
import lombok.Getter;
import lombok.Setter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


public abstract class CheckBasic implements BasicOp {
    @Getter @Setter protected String appname;
    @Getter @Setter protected String masterSet;
    @Getter @Setter protected String logLevel;

    //from csv
    @Getter @Setter protected String dataExportPath = null;
    @Getter @Setter protected String dataSavePath = null;
    @Getter @Setter protected String dataSaveStyle = null;

    //from jdbc
    @Getter @Setter protected String url = null;
    @Getter @Setter protected String driver = null;
    @Getter @Setter protected String user = null;
    @Getter @Setter protected String password = null;
    @Getter @Setter protected String dbtable = null;

    //from hive
    @Getter @Setter protected String databaseName = null;
    @Getter @Setter protected String tableName = null;

    protected final Logger logger = LoggerFactory.getLogger(CheckBasic.class);
    /*
    excel,csv,json,txt
    **/
    protected CheckBasic(String appName, String masterSet, String logLevel, String dataExportPath,
               String dataSavePath, String dataSaveStyle){
        this.appname = appName;
        this.masterSet = masterSet;
        this.logLevel = logLevel;
        this.dataExportPath = dataExportPath;
        this.dataSavePath = dataSavePath;
        this.dataSaveStyle = dataSaveStyle;
    }

    /*
    hive
    **/
    protected CheckBasic(String appName, String masterSet, String logLevel, String databaseName, String tableName,
                         String dataSavePath, String dataSaveStyle){
        this.appname = appName;
        this.masterSet = masterSet;
        this.logLevel = logLevel;
        this.databaseName = databaseName;
        this.tableName = tableName;
        this.dataSavePath = dataSavePath;
        this.dataSaveStyle = dataSaveStyle;
    }

    /*
    jdbc
    **/
    protected CheckBasic(String appName, String masterSet, String logLevel, String url, String driver, String user,
                         String password, String dbtable, String dataSavePath, String dataSaveStyle) {
        this.appname = appName;
        this.masterSet = masterSet;
        this.logLevel = logLevel;
        this.url = url;
        this.driver = driver;
        this.user = user;
        this.password = password;
        this.dbtable = dbtable;
        this.dataSavePath = dataSavePath;
        this.dataSaveStyle = dataSaveStyle;
    }

    @Override
    public SparkSession envSet(String appName, String masterSet, String logLevel) {
        SparkSession spark = SparkSession.builder().appName(appName)
                .master(masterSet) //如果到hadoop集群上需要注释掉这个参数，并且在终端输入这个参数的配置
                .getOrCreate();
        spark.sparkContext().setLogLevel(logLevel);
        return spark;
    }

    @Override
    /*
    hive待测试
    **/
    public SparkSession envSetHive(String appName, String masterSet, String logLevel){
        SparkSession spark = SparkSession.builder().appName(appName)
//                .master(masterSet)
                .enableHiveSupport()
                .getOrCreate();
        spark.sparkContext().setLogLevel(logLevel);
        return spark;
    }

    @Override
    public Dataset<Row> getDataFromExcel(String path, SparkSession spark) {
        Dataset<Row> result = spark.read().format("com.crealytics.spark.excel")
                .option("header", "true")
                .option("treatEmptyValuesAsNulls", "true")
                // 自动推断schema
                .option("inferSchema", "true")
                .load(path);

        return result;
    }
    //TODO: 实现从其他数据库读入的功能

    @Override
    public Dataset<Row> getDataFromExcel(String path, SparkSession spark, StructType structType) {
        Dataset<Row> result = spark.read().format("com.crealytics.spark.excel")
                .option("header", "true")
                .option("treatEmptyValuesAsNulls", "true")
                .schema(structType)
                .load(path);
        return result;
    }

    @Override
    public Dataset<Row> getDataFromJdbc(SparkSession spark, String url, String driver, String user, String password, String dbtable){
        Dataset<Row> result = spark.read().format("jdbc")
                .option("url", url)
                .option("driver", driver)
                .option("user", user)
                .option("password", password)
                .option("dbtable", dbtable)
                .load();
        return result;
    }

    @Override
    public Dataset<Row> getDataFromHive(SparkSession spark, String databaseName, String tableName){
        String sql = "select * from " + databaseName + "." + tableName;
        logger.warn("SqlText is "+sql);
        Dataset<Row> result = spark.sql(sql);
        return result;
    }

    @Override
    public long getDataTotalRowNum(Dataset<Row> data) {
        return data.count();
    }

    @Override
    public int getDataTotalColNum(Dataset<Row> data) {
        return data.columns().length;
    }

    @Override
    public void resultSave(Dataset<Row> data, String path, final String fileStyle){
        /*
        默认保存为csv格式
        */
        data.repartition(1).write().option("header", "true").mode("overwrite").format(fileStyle).save(path);
    }
}
