package com.ctbri.manage.quality.multi;
import com.ctbri.manage.quality.original.CheckBasic;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.api.java.UDF2;
import org.apache.spark.sql.functions;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructType;
import com.ctbri.manage.bydeequ.calculate.ForJavaCalcResultGeneration;
import java.util.*;
import lombok.Setter;
import lombok.Getter;

/**
 Two tables per position check
 check_col: List<List<String>>
 此函数要求必须在所有主key都对应相等且不能存在null时才可以使用
 */
public class TwoTablePerPosEqual extends CheckBasic {

    @Getter @Setter private String dataExportPath2 = null;
    @Getter @Setter private String dbtable2 = null;
    @Getter @Setter private String tableName2 = null;

    @Getter @Setter private ArrayList<String> joinColName1;
    @Getter @Setter private ArrayList<String> joinColName2;
    @Getter @Setter private String joinMode;
    @Getter @Setter private List<ArrayList<String>> checkCol;

    /*
    excel,cvs,txt,json
    **/
    public TwoTablePerPosEqual(String appName, String masterSet, String logLevel, String dataExportPath,
                               String dataSavePath, String dataSaveStyle, String dataExportPath2,
                               ArrayList<String> joinColName1, ArrayList<String> joinColName2, String joinMode, List<ArrayList<String>> checkCol) {
        super(appName, masterSet, logLevel, dataExportPath, dataSavePath, dataSaveStyle);
        this.dataExportPath2 = dataExportPath2;
        this.joinColName1 = joinColName1;
        this.joinColName2 = joinColName2;
        this.joinMode = joinMode;
        this.checkCol = checkCol;
    }
    /*
    jdbc
    **/
    public TwoTablePerPosEqual(String appName, String masterSet, String logLevel, String url, String driver, String user,
                               String password, String dbtable1, String dbtable2,
                               ArrayList<String> joinColName1, ArrayList<String> joinColName2, String joinMode, List<ArrayList<String>> checkCol,
                               String dataSavePath, String dataSaveStyle) {
        super(appName, masterSet, logLevel, url, driver, user, password, dbtable1, dataSavePath, dataSaveStyle);
        this.dbtable2 = dbtable2;
        this.joinColName1 = joinColName1;
        this.joinColName2 = joinColName2;
        this.joinMode = joinMode;
        this.checkCol = checkCol;
    }

    /*
    hive
    **/
    public TwoTablePerPosEqual(String appName, String masterSet, String logLevel, String databaseName, String tableName1,
                               String tableName2, ArrayList<String> joinColName1, ArrayList<String> joinColName2, String joinMode, List<ArrayList<String>> checkCol,
                               String dataSavePath, String dataSaveStyle) {
        super(appName, masterSet, logLevel, databaseName, tableName1, dataSavePath, dataSaveStyle);
        this.tableName2 = tableName2;
        this.joinColName1 = joinColName1;
        this.joinColName2 = joinColName2;
        this.joinMode = joinMode;
        this.checkCol = checkCol;
    }

    public void call(StructType structType1, StructType structType2, final String dataSource, final boolean isTesting, List<Dataset<Row>> dfList, final Boolean meetRequirements) throws Exception{
       /* SparkSession spark = null;
        Dataset<Row> dataframe1 = null, dataframe2 = null;
        System.out.println("dataSource: " + dataSource);
        try{
            if (isTesting){
                spark = this.envSet(this.appname, this.masterSet, this.logLevel);
                dataframe1 = dfList.get(0);
                dataframe2 = dfList.get(1);
            }
            else if (dataSource.equals("excel")){
                spark = this.envSet(this.appname, this.masterSet, this.logLevel);
                dataframe1 = structType1 != null?
                        this.getDataFromExcel(this.dataExportPath, spark, structType1): this.getDataFromExcel(this.dataExportPath, spark);
                dataframe2 = structType2 != null?
                        this.getDataFromExcel(this.dataExportPath2, spark, structType2): this.getDataFromExcel(this.dataExportPath2, spark);
            }
            else if (dataSource.equals("jdbc")){
                spark = this.envSet(this.appname, this.masterSet, this.logLevel);
                dataframe1 = this.getDataFromJdbc(spark, this.url, this.driver, this.user, this.password, this.dbtable);
                dataframe2 = this.getDataFromJdbc(spark, this.url, this.driver, this.user, this.password, this.dbtable2);
            }
            else if (dataSource.equals("hive")){
                spark = this.envSetHive(this.appname, this.masterSet, this.logLevel);
                dataframe1 = this.getDataFromHive(spark, this.databaseName, this.tableName);
                dataframe2 = this.getDataFromHive(spark, this.databaseName, this.tableName2);
            }
            //TODO: 实现可以选择从哪个数据库读入的功能(csv,txt,json)
            else{
                throw new Exception();
            }
        } catch (Exception e){
            e.printStackTrace();
        } finally {
            //数据分析/////////
            // 基于单个key进行的join代码段
//            Dataset<Row> jointDataframe = dataframe1
//                    .join(dataframe2, dataframe1.col(this.joinColName1).equalTo(dataframe2.col(this.joinColName2)), this.joinMode);
            //
            // 获取两个表所有的join主key列表长度并判断长度是否一致
            int jcl1Len = joinColName1.size();
            int jcl2Len = joinColName2.size();
            try{
                if (jcl1Len != jcl2Len) throw new Exception();
            } catch (Exception e){
                System.out.println("joinColName1.size() != joinColName2.size()");
                e.printStackTrace();
            } finally { //todo: 具体用left join还是inner join，后续需要进一步明确
                // 基于多个key进行join的代码段
                dataframe1.createOrReplaceGlobalTempView("dataframe1");
                dataframe2.createOrReplaceGlobalTempView("dataframe2");
                //df1 diff df2
                StringBuffer sqlString = new StringBuffer("select ");
                //此处计算差集只需要得到所有join key的表就可以，不需要所有字段都列举出来
                //检查两个表的col列表
                for (String name1: dataframe1.columns()) System.out.print(name1 + " ");
                System.out.println();
                for (String name2: dataframe2.columns()) System.out.print(name2 + " ");
                System.out.println();
                //
                for (int i = 0; i < dataframe1.columns().length; ++i) {
                    sqlString.append("df1." + Arrays.stream(dataframe1.columns()).toArray()[i]
                            + " as df1_" + Arrays.stream(dataframe1.columns()).toArray()[i] + ", ");
                }
                for (int i = 0; i < dataframe2.columns().length; ++i) {
                    sqlString.append("df2." + Arrays.stream(dataframe2.columns()).toArray()[i]
                            + " as df2_" + Arrays.stream(dataframe2.columns()).toArray()[i]);
                    if (i < dataframe2.columns().length - 1) sqlString.append(", ");
                }
                sqlString.append(" from global_temp.dataframe1 df1 left join global_temp.dataframe2 df2 on ");
                for (int i = 0; i < jcl1Len; ++i) {
                    sqlString.append("df1." + joinColName1.get(i) + "=" + "df2." + joinColName2.get(i));
                    if (i < jcl1Len - 1) sqlString.append(" and ");
                }
                sqlString.append(";");
                System.out.println("query: " + sqlString);
                assert spark != null;
                Dataset<Row> jointDataframe = spark.sql(sqlString.toString());
                spark.catalog().dropGlobalTempView("dataframe1");
                spark.catalog().dropGlobalTempView("dataframe2");
                //
                try{
//                    System.out.println("check_col size= " + this.check_col.size());
                    if (this.checkCol.size() == 0) throw new Exception();
                    spark.udf().register("checkEqual", new NullCheck(), DataTypes.BooleanType);
                    //多个key进行join时，用单列结果初始化待输出的表
                    jointDataframe.createOrReplaceGlobalTempView("jointDataframe");
                    StringBuffer boolDfSql = new StringBuffer("select ");
                    for (int i = 0; i < jcl1Len; ++i) {
                        boolDfSql.append("df1_" + joinColName1.get(i) + ", ");
                    }
                    List<String> boolColNames = new ArrayList<>();
                    for (int i=0; i<this.checkCol.size(); ++i){
                        boolColNames.add("df1_"+this.checkCol.get(i).get(0) + "_diff_" + "df2_" + this.checkCol.get(i).get(1));
                        boolDfSql.append("checkEqual(" + "df1_" + this.checkCol.get(i).get(0) + ","
                                + "df2_" + this.checkCol.get(i).get(1) + ") "
                                + "df1_"+this.checkCol.get(i).get(0) + "_diff_" + "df2_" + this.checkCol.get(i).get(1));
                        if (i < this.checkCol.size() - 1) boolDfSql.append(", ");
                        else boolDfSql.append(" ");
                    }
                    boolDfSql.append("from global_temp.jointDataframe");
                    boolDfSql.append(" order by ");
                    for (int i=0; i<jcl1Len; ++i){
                        boolDfSql.append("df1_" + joinColName1.get(i));
                        if (i < jcl1Len - 1) boolDfSql.append(", ");
                    }
                    boolDfSql.append(";");
                    System.out.println("query: " + boolDfSql);
                    Dataset<Row> boolDataframe = spark.sql(boolDfSql.toString());
                    assert spark != null;

                    //单个key进行join时，用单列结果初始化待输出的表
//                    Dataset<Row> boolDataframe = jointDataframe.select(
//                            functions.col(this.joinColName1).alias(this.joinColName1),
//                            functions.callUDF("checkEqual", functions.col(this.checkCol.get(0).get(0)),
//                                    functions.col(this.checkCol.get(0).get(1))).alias(this.checkCol.get(0).get(0)+"_diff")
//                    );
//                    //再与其它待查询列进行拼接
//                    for (int i = 1; i<this.checkCol.size(); ++i){
//                        Dataset<Row> temp = jointDataframe.select(
//                                functions.col(this.joinColName1).alias("temp_"+this.joinColName1),
//                                functions.callUDF("checkEqual", functions.col(this.checkCol.get(i).get(0)),
//                                                functions.col(this.checkCol.get(i).get(1)))
//                                        .alias(this.checkCol.get(i).get(0)+"_diff")
//                        );
////                        temp.show();
//                        boolDataframe = boolDataframe
//                                .join(temp, boolDataframe.col(this.joinColName1).equalTo(temp.col("temp_"+this.joinColName1)), this.joinMode)
//                                .drop("temp_"+this.joinColName1);
////                        boolDataframe.show();
//                    }
                    boolDataframe.show(100, false);
                    //生成结果清单
                    for (String colName: boolColNames){
                        System.out.println(colName);
                    }
                    Dataset<Row> dataResult = ForJavaCalcResultGeneration.twoTablePerPosEqualResultAndDataListGeneration(
                            "Dataset",
                            spark,
                            "x=1.0",
                            this.dataSavePath,
                            boolDataframe,
                            boolColNames.toArray(new String[boolColNames.size()]),
                            joinColName1.toArray(new String[joinColName1.size()]),
                            meetRequirements
                    );
                    System.out.println(dataResult);
                    this.resultSave(dataResult, this.dataSavePath+"/dataList", this.dataSaveStyle);
                    spark.catalog().dropGlobalTempView("jointDataframe");
                } catch (Exception e){
                    e.printStackTrace();
                } finally {
                    spark.stop();
                }
            }
        }*/

    }
}

class NullCheck implements UDF2<String, String, Boolean> {
    @Override
    public Boolean call(String col1, String col2) throws Exception {
        if (col1 == null) return col2 == null;
        else if (col2 == null) return false; //col1!=null
        else return col1.equals(col2);
    }
}
