package com.ctbri.manage.quality.multi;
import com.ctbri.manage.quality.original.CheckBasic;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.functions;
import org.apache.spark.sql.types.StructType;
import lombok.Getter;
import lombok.Setter;

import java.util.*;
import scala.collection.Seq;

/**
Two tables difference check
 * @author xiao
 */
public class DifferenceStatic2 extends CheckBasic {
    @Getter @Setter private String dataExportPath2 = null;
    @Getter @Setter private ArrayList<String> joinColName1;
    @Getter @Setter private ArrayList<String> joinColName2;
    @Getter @Setter private String joinMode;
    @Getter @Setter private String dbtable2 = null;
    @Getter @Setter private String tableName2 = null;
    /*
    excel,csv,txt,json
    **/
    public DifferenceStatic2(String appName, String masterSet, String logLevel, String dataExportPath, String dataSavePath,
                             String dataSaveStyle, String dataExportPath2, ArrayList<String> joinColName1, ArrayList<String> joinColName2,
                             String joinMode) {
        super(appName, masterSet, logLevel, dataExportPath, dataSavePath, dataSaveStyle);
        this.dataExportPath2 = dataExportPath2;
        this.joinColName1 = joinColName1;
        this.joinColName2 = joinColName2;
        this.joinMode = joinMode;
    }
    /*
    jdbc
    **/
    public DifferenceStatic2(String appName, String masterSet, String logLevel, String url, String driver, String user,
                             String password, String dbtable1, String dbtable2, ArrayList<String> joinColName1, ArrayList<String> joinColName2,
                             String joinMode, String dataSavePath, String dataSaveStyle) {
        super(appName, masterSet, logLevel, url, driver, user, password, dbtable1, dataSavePath, dataSaveStyle);
        this.dbtable2 = dbtable2;
        this.joinColName1 = joinColName1;
        this.joinColName2 = joinColName2;
        this.joinMode = joinMode;
    }
    /*
    hive
    **/
    public DifferenceStatic2(String appName, String masterSet, String logLevel, String databaseName, String tableName1,
                             String tableName2, ArrayList<String> joinColName1, ArrayList<String> joinColName2,
                             String joinMode, String dataSavePath, String dataSaveStyle) {
      super(appName, masterSet, logLevel, databaseName, tableName1, dataSavePath, dataSaveStyle);
        this.tableName2 = tableName2;
        this.joinColName1 = joinColName1;
        this.joinColName2 = joinColName2;
        this.joinMode = joinMode;
    }

    public void call(StructType structType1, StructType structType2, final String dataSource, final Boolean need1Diff2, final Boolean need2Diff1) throws Exception{
      /*
        @dataSource: csv, txt, excel, json, jdbc, hive
        */
        SparkSession spark =   this.envSet(this.appname, this.masterSet, this.logLevel);
        Dataset<Row> dataframe1 = this.getDataFromJdbc(spark, this.url, this.driver, this.user, this.password, this.dbtable);;
        Dataset<Row>  dataframe2 = this.getDataFromJdbc(spark, this.url, this.driver, this.user, this.password, this.dbtable2);;
        System.out.println("dataSource: " + dataSource);
        try{
            if (dataSource.equals("excel")){
                spark = this.envSet(this.appname, this.masterSet, this.logLevel);
                dataframe1 = structType1 != null?
                        this.getDataFromExcel(this.dataExportPath, spark, structType1): this.getDataFromExcel(this.dataExportPath, spark);
                dataframe2 = structType2 != null?
                        this.getDataFromExcel(this.dataExportPath2, spark, structType2): this.getDataFromExcel(this.dataExportPath2, spark);
            }
            else if (dataSource.equals("jdbc")){
                spark = this.envSet(this.appname, this.masterSet, this.logLevel);
                dataframe1 = this.getDataFromJdbc(spark, this.url, this.driver, this.user, this.password, this.dbtable);
                dataframe2 = this.getDataFromJdbc(spark, this.url, this.driver, this.user, this.password, this.dbtable2);
            }
            else if (dataSource.equals("hive")){
                spark = this.envSetHive(this.appname, this.masterSet, this.logLevel);
                dataframe1 = this.getDataFromHive(spark, this.databaseName, this.tableName);
                dataframe2 = this.getDataFromHive(spark, this.databaseName, this.tableName2);
            }
            //TODO: 实现可以选择从哪个数据库读入的功能(csv,txt,json)
            else{
                throw new Exception();
            }
        } catch (Exception e){
            e.printStackTrace();
        } finally {
            //数据分析
            try{
                if (!need1Diff2 || !need2Diff1){
                    System.out.println("need two tables!");
                    throw new Exception();
                }
                int jcl1Len = joinColName1.size();
                int jcl2Len = joinColName2.size();
                try{
                    if (jcl1Len != jcl2Len){ throw new Exception();}
                } catch (Exception e){
                    System.out.println("joinColName1.size() != joinColName2.size()");
                    e.printStackTrace();
                } finally {
                    dataframe1.createOrReplaceGlobalTempView("dataframe1");
                    dataframe2.createOrReplaceGlobalTempView("dataframe2");
                    //df1 diff df2
                    StringBuffer df1DiffDf2Sql = new StringBuffer("select ");
                    //此处计算差集只需要得到所有join key的表就可以，不需要所有字段都列举出来
                    for (int i=0; i<jcl1Len; ++i){
                        df1DiffDf2Sql.append("df1." + joinColName1.get(i) + " as df1_" + joinColName1.get(i) + ", ");
                    }
                    for (int i=0; i<jcl2Len; ++i){
                        df1DiffDf2Sql.append("df2." + joinColName2.get(i) + " as df2_" + joinColName2.get(i));
                        if (i < jcl1Len - 1) {  df1DiffDf2Sql.append(", "); }
                    }
                    df1DiffDf2Sql.append(" from global_temp.dataframe1 df1 left join global_temp.dataframe2 df2 on ");
                    for (int i=0; i<jcl1Len; ++i){
                        df1DiffDf2Sql.append("df1." + joinColName1.get(i) + "=" + "df2." + joinColName2.get(i));
                        if (i < jcl1Len - 1)  { df1DiffDf2Sql.append(" and "); }
                    }
                    df1DiffDf2Sql.append(" where ");
                    for (int i=0; i<jcl2Len; ++i){
                        df1DiffDf2Sql.append("df2." + joinColName2.get(i) + " is null");
                        if (i < jcl2Len - 1) { df1DiffDf2Sql.append(" or ");}
                    }
                    df1DiffDf2Sql.append(" order by ");
                    for (int i=0; i<jcl1Len; ++i){
                        df1DiffDf2Sql.append("df1." + joinColName1.get(i));
                        if (i < jcl1Len - 1) { df1DiffDf2Sql.append(", ");}
                    }
                    df1DiffDf2Sql.append(";");
                    System.out.println("query: " + df1DiffDf2Sql);
                    assert spark != null;
                    Dataset<Row> temp1 = spark.sql(df1DiffDf2Sql.toString());
                    final long table1HasTable2NotHas = temp1.count();
                    System.out.println("table1 has table2 doesn't have: " + table1HasTable2NotHas);
                    this.resultSave(temp1, this.dataSavePath +"1", this.dataSaveStyle);

                    // df2 diff df1
                    StringBuffer df2DiffDf1Sql = new StringBuffer("select ");
                    //此处计算差集只需要得到所有join key的表就可以，不需要所有字段都列举出来
                    for (int i=0; i<jcl2Len; ++i){
                        df2DiffDf1Sql.append("df2." + joinColName2.get(i) + " as df2_" + joinColName2.get(i) + ", ");
                    }
                    for (int i=0; i<jcl1Len; ++i){
                        df2DiffDf1Sql.append("df1." + joinColName1.get(i) + " as df1_" + joinColName1.get(i));
                        if (i < jcl1Len - 1)  { df2DiffDf1Sql.append(", "); }
                    }
                    df2DiffDf1Sql.append(" from global_temp.dataframe2 df2 left join global_temp.dataframe1 df1 on ");
                    for (int i=0; i<jcl1Len; ++i){
                        df2DiffDf1Sql.append("df2." + joinColName2.get(i) + "=" + "df1." + joinColName1.get(i));
                        if (i < jcl1Len - 1)  { df2DiffDf1Sql.append(" and ");}
                    }
                    df2DiffDf1Sql.append(" where ");
                    for (int i=0; i<jcl1Len; ++i){
                        df2DiffDf1Sql.append("df1." + joinColName1.get(i) + " is null");
                        if (i < jcl1Len - 1) { df2DiffDf1Sql.append(" or ");}
                    }
                    df2DiffDf1Sql.append(" order by ");
                    for (int i=0; i<jcl2Len; ++i){
                        df2DiffDf1Sql.append("df2." + joinColName2.get(i));
                        if (i < jcl2Len - 1) { df2DiffDf1Sql.append(", ");}
                    }
                    df2DiffDf1Sql.append(";");
                    System.out.println("query: " + df2DiffDf1Sql);
                    assert spark != null;
                    Dataset<Row> temp2 = spark.sql(df2DiffDf1Sql.toString());
                    final long table2HasTable1NotHas = temp2.count();
                    System.out.println("table2 has table1 doesn't have: " + table2HasTable1NotHas);
                    this.resultSave(temp2, this.dataSavePath +"2", this.dataSaveStyle);
                    spark.catalog().dropGlobalTempView("dataframe1");
                    spark.catalog().dropGlobalTempView("dataframe2");

                    //todo: 连接方式可以自主选择 and or 甚至添加内置函数或udf函数的功能待实现
                    // 单join key case的代码段
                    //                Dataset<Row> temp1 = dataframe1
                    //                        .join(dataframe2, dataframe1.col(this.joinColName1).equalTo(dataframe2.col(this.joinColName2)), this.joinMode)
                    //                        .select(dataframe1.col(this.joinColName1).alias(this.joinColName1),
                    //                                dataframe2.col(this.joinColName2).alias(this.joinColName2))
                    //                        .where(functions.col(this.joinColName2)
                    //                                .isNull());
                }


            } catch (Exception e){
                e.printStackTrace();
            }
            finally {
                spark.stop();
            }
        }
    }
}
