package spark

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.catalyst.dsl.expressions.{DslExpression, StringToAttributeConversionHelper}
import org.apache.spark.sql.functions._

import scala.collection.mutable.ArrayBuffer


/**
 * 用来处理每个表的ELT
 */
class ELTHandler {
  // 抽象出来的取出重复和空值的函数
  def deleteNullAndDup(tableName: String, df: DataFrame, columnNames: Array[String]): DataFrame = {
    // 去除重复的行
    var df1 = df.dropDuplicates()

    // 过滤制定列的空值
    for(colName <- columnNames){
        println(colName)  // 字段名
        df1 = df1.filter(col(colName).isNotNull) // 空值
        df1 = df1.filter(col(colName) =!= "")  // 空字符串
    }

    return df1
  }

  // 处理表pri_cust_liab_info √
  // CREATE TABLE dm.pri_cust_liab_info (
  //    uid String, all_bal decimal(18,2), bad_bal decimal(18,2), due_intr decimal(18,2), norm_bal decimal(18,2), delay_bal decimal(18,2), etl_dt String
  //)
  def handle_pri_cust_liab_info(df: DataFrame) : DataFrame = {
    var tableName = "pri_cust_liab_info"
    // 获取表的所有字段名
    val columnNames = df.columns.toList.map(name => name.substring(tableName.length + 1)).toArray
    var df1 = df.toDF(columnNames: _*)

    // 去重和去空
    df1 = deleteNullAndDup(tableName, df1, columnNames)

    // 额外的处理
    // 将同一个用户的all_bal，bad_bal，due_intr，norm_bal这四个字段做聚合,并且只记录最近的一个elt_dt
    df1 = df1.groupBy("uid").agg(
      sum("all_bal").alias("all_bal"),
      sum("bad_bal").alias("bad_bal"),
      sum("due_intr").alias("due_intr"),
      sum("norm_bal").alias("norm_bal"),
      max("etl_dt").alias("etl_dt")
    )

    // 展示
    df1.show()
    return df1
  }

  // 处理表pri_cust_liab_acct_info ×  内存不足
  def handle_pri_cust_liab_acct_info(df: DataFrame) : DataFrame = {
    var tableName = "pri_cust_liab_acct_info"
    // 获取表的所有字段名
    val columnNames = df.columns.toList.map(name => name.substring(tableName.length + 1)).toArray
    var df1 = df.toDF(columnNames: _*)

    // 去重和去空
    var columnNamesToFilter = Array[String](
      "cust_no",
      "loan_cust_no",
      "cust_name",
      "uid",
      "acct_no"
    )

    df1 = deleteNullAndDup(tableName, df1, columnNamesToFilter)
    // 暂无额外的处理

    // 展示数据
    df1.show()
    return df1
  }

  // 处理pri_cust_base_info √
  def handle_pri_cust_base_info(df: DataFrame): DataFrame = {
    var tableName = "pri_cust_base_info"
    // 获取表的所有字段名
    val columnNames = df.columns.toList.map(name => name.substring(tableName.length + 1)).toArray
    var df1 = df.toDF(columnNames: _*)

    // 去重和去空
    df1 = deleteNullAndDup(tableName, df1, columnNames)

    // 额外的处理
    // 去掉uid和cust_no相同的行
    df1 = df1.dropDuplicates("uid")
    df1 = df1.dropDuplicates("cust_no")

    // 展示数据
    df1.show()
    return df1
  }

  // 处理pri_pri_star_info √
  // uid  -- 证件号码
  // star_level -- 客户星级
  def handle_pri_star_info(df: DataFrame): DataFrame = {
    var tableName = "pri_star_info"
    // 获取表的所有字段名
    val columnNames = df.columns.toList.map(name => name.substring(tableName.length + 1)).toArray
    var df1 = df.toDF(columnNames: _*)

    // 去重和去空
    df1 = deleteNullAndDup(tableName, df1, columnNames)

    // 额外的处理
    // 过滤掉uid重复的
    df1 = df1.dropDuplicates("uid")
    // 过滤掉star_level为-1的
    df1 = df1.filter(col("star_level") =!= -1)

    // 展示数据
    df1.show()
    return df1
  }

  // 处理dm_v_as_djk_info √
  def handle_dm_v_as_djk_info(df: DataFrame): DataFrame = {
    var tableName = "dm_v_as_djk_info"
    // 获取表的所有字段名
    val columnNames = df.columns.toList.map(name => name.substring(tableName.length + 1)).toArray
    var df1 = df.toDF(columnNames: _*)

    // 去重和去空
    var columnNamesToFilter = Array[String](
  "acct_no",
      "card_no",
      "cust_name",
      "uid"
    )
    df1 = deleteNullAndDup(tableName, df1, columnNamesToFilter);
    df1 = df1.dropDuplicates(columnNamesToFilter)

    // 展示数据
    df1.show()
    return df1
  }

  // 处理dm_v_as_djkfq_info √
  def handle_dm_v_as_djkfq_info(df: DataFrame): DataFrame = {
    var tableName = "dm_v_as_djkfq_info"
    // 获取表的所有字段名
    val columnNames = df.columns.toList.map(name => name.substring(tableName.length + 1)).toArray
    var df1 = df.toDF(columnNames: _*)

    // 去重和去空
    var columnNamesToFilter = Array[String](
      "acct_no",
      "card_no",
      "cust_name",
      "uid"
    )
    df1 = deleteNullAndDup(tableName, df1, columnNamesToFilter)
    df1 = df1.dropDuplicates(columnNamesToFilter)

    // 展示数据
    df1.show()
    return df1
  }

  // 处理pri_credit_info √
  def handle_pri_credit_info(df: DataFrame): DataFrame = {
    var tableName = "pri_credit_info"
    // 获取表的所有字段名
    val columnNames = df.columns.toList.map(name => name.substring(tableName.length + 1)).toArray
    var df1 = df.toDF(columnNames: _*)

    // 去重和去空
    df1 = deleteNullAndDup(tableName, df1, columnNames)

    // 额外的处理
    // 去掉credit_level为负数的行
    df1 = df1.filter(col("credit_level") >= 0)

    // 展示数据
    df1.show()
    return df1
  }

  // 处理pri_cust_asset_acct_info √
  def handle_pri_cust_asset_acct_info(df: DataFrame): DataFrame = {
    var tableName = "pri_cust_asset_acct_info"
    // 此表不能暴力过滤
    // 获取表的所有字段名
    val columnNames = df.columns.toList.map(name => name.substring(tableName.length + 1)).toArray
    var df1 = df.toDF(columnNames: _*)

    // 过滤掉以下列为空值的
    val columnNamesToFilter = Array[String](
    "cust_no",
        "cust_name",
        "uid",
        "acct_no",
        "card_no"
    )

    // 去重和去空
    df1 = deleteNullAndDup(tableName, df1, columnNamesToFilter)
    // 去掉cust_no cust_name uid acct_no card_no
    df1 = df1.dropDuplicates(columnNamesToFilter)

    // 展示数据
    df1.show()
    return df1
  }

  // 处理pri_cust_asset_info √
  def handle_pri_cust_asset_info(df: DataFrame): DataFrame = {
    var tableName = "pri_cust_asset_info"
    // 获取表的所有字段名
    val columnNames = df.columns.toList.map(name => name.substring(tableName.length + 1)).toArray
    var df1 = df.toDF(columnNames: _*)

    val columnNamesToFilter = Array[String](
      "cust_no",
      "cust_name",
      "uid",
      "belong_org",
      "exam_org"
    )

    // 去重和去空
    df1 = deleteNullAndDup(tableName, df1, columnNamesToFilter)
    df1 = df1.dropDuplicates(columnNamesToFilter)

    // 展示数据
    df1.show()
    return df1
  }

  // 单独处理pri_cust_contact_info √
  def handle_pri_cust_contact_info(df: DataFrame): DataFrame = {
    var tableName = "pri_cust_contact_info"
    // 获取表的所有字段名
    val columnNames = df.columns.toList.map(name => name.substring(tableName.length + 1)).toArray
    var df1 = df.toDF(columnNames: _*)

    // 去重和去空
    // 并且根据uid合并
    df1 = df1.filter(col("contact").isNotNull
      && col("contact") =!= ""
      && col("contact") =!= "-"
      && col("contact") =!= "无")
      .groupBy(col("uid"))
      .agg(
        concat_ws(",", collect_set(when(col("con_type").isin("TEL", "OTH", "MOB"), col("contact")).otherwise(null))).alias("contact_phone"),
        concat_ws(",", collect_set(when(!col("con_type").isin("TEL", "OTH", "MOB"), col("contact")).otherwise(null))).alias("contact_address")
      )

    // 展示数据
    df1.printSchema()
    df1.show()
    return df1
  }
}
