package com.ctbri.manage.quality.single;
import com.ctbri.manage.quality.original.CheckBasic;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.sql.*;

public class CheckColumnNullType extends CheckBasic{
    /*
    excel,csv,json,txt
    **/
    public CheckColumnNullType(String appName, String masterSet, String logLevel, String dataExportPath,
                                   String dataSavePath, String dataSaveStyle){
        super(appName, masterSet, logLevel, dataExportPath, dataSavePath, dataSaveStyle);
    }
    /*
    jdbc
    **/
    public CheckColumnNullType(String appName, String masterSet, String logLevel, String url, String driver, String user,
                               String password, String dbtable, String dataSavePath, String dataSaveStyle){
        super(appName, masterSet, logLevel, url, driver, user, password, dbtable, dataSavePath, dataSaveStyle);
    }

    /*
    hive
    **/
    public CheckColumnNullType(String appName, String masterSet, String logLevel, String databaseName, String tableName,
                               String dataSavePath, String dataSaveStyle){
        super(appName, masterSet, logLevel, databaseName, tableName, dataSavePath, dataSaveStyle);
    }

    public void call(StructType structType, final String dataSource, final String needCheckColName) throws Exception{
        /*
        @dataSource: csv, txt, excel, json, jdbc, hive
        */
        SparkSession spark = this.envSet(this.appname, this.masterSet, this.logLevel);;
        Dataset<Row> dataframe = this.getDataFromJdbc(spark, this.url, this.driver, this.user, this.password, this.dbtable);;
        System.out.println("dataSource: " + dataSource);
        try{
            if (dataSource.equals("excel")){
                spark = this.envSet(this.appname, this.masterSet, this.logLevel);
                dataframe = structType != null?
                        this.getDataFromExcel(this.dataExportPath, spark, structType): this.getDataFromExcel(this.dataExportPath, spark);
            }
            else if (dataSource.equals("jdbc")){
                spark = this.envSet(this.appname, this.masterSet, this.logLevel);
                dataframe = this.getDataFromJdbc(spark, this.url, this.driver, this.user, this.password, this.dbtable);
            }
            else if (dataSource.equals("hive")){
                spark = this.envSetHive(this.appname, this.masterSet, this.logLevel);
                dataframe = this.getDataFromHive(spark, this.databaseName, this.tableName);
            }
            //TODO: 实现可以选择从哪个数据库读入的功能(csv,txt,json)
            else{
                throw new Exception();
            }
        } catch (Exception e){
            e.printStackTrace();
        } finally {
            //数据分析区
            if(null !=dataframe && null !=spark) {
                Dataset<Row> result = dataframe.filter(functions.col(needCheckColName).isNull());
                result.show(false);
                this.resultSave(result, this.dataSavePath, this.dataSaveStyle);
                System.out.println("original data's total number: " + dataframe.count());
                System.out.println("not satisfied number: " + result.count());
                spark.stop();
            }
        }
    }
}
