package spark

import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.functions.{col, grouping, split}

import java.util.Properties
import scala.util.matching.Regex


/*
  1.spark版本变更为2.3.3，部署模式local即可。也可探索其他模式。
  2.由于远程调试出现的各种问题，且远程调试并非作业重点，这里重新建议使用spark-submit方式
  3.本代码及spark命令均为最简单配置。如运行出现资源问题，请根据你的机器情况调整conf的配置以及spark-submit的参数，具体指分配CPU核数和分配内存。

  调试：
    当前代码中集成了spark-sql，可在开发机如windows运行调试;
    需要在开发机本地下载hadoop，因为hadoop基于Linux编写，在开发机本地调试需要其中的一些文件，如模拟Linux目录系统的winutils.exe；
    请修改System.setProperty("hadoop.home.dir", "your hadoop path in windows like E:\\hadoop-x.x.x")

  部署：
    注释掉System.setProperty("hadoop.home.dir", "your hadoop path in windows like E:\\hadoop-x.x.x")；
    修改pom.xml中<scope.mode>compile</scope.mode>为<scope.mode>provided</scope.mode>
    打包 mvn clean package
    上传到你的Linux机器

    注意在~/base_profile文件中配置$SPARK_HOME,并source ~/base_profile,或在bin目录下启动spark-submit
    spark-submit Spark2DB-1.0.jar
 */


object Hive2 {
  // parameters
  LoggerUtil.setSparkLogLevels()

  def main(args: Array[String]): Unit = {
    val phone = List("MOB","OTH","TEL")
    val invalid=List("无","-",null,"")
    val boolean=phone.contains("MOB")
    print(boolean)

//    System.setProperty("hadoop.home.dir", "your hadoop path in windows like E:\\hadoop-x.x.x")

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[2]")
      .set("spark.executor.memory","4G")



    val session = SparkSession.builder()
      .config(conf)
      .getOrCreate()
    import session.implicits._

    val reader = session.read.format("jdbc")
      .option("url", "jdbc:hive2://172.29.4.17:10000/default")
      .option("user", "student")
      .option("password", "nju2022")
      .option("driver", "org.apache.hive.jdbc.HiveDriver")

    val registerHiveDqlDialect = new RegisterHiveSqlDialect()
    registerHiveDqlDialect.register()

    val write_maps = Map[String,String](
      "batchsize"->"2000",
      "isolationLevel"->"NONE",
      "numPartitions"->"1"
    )
    val url = "jdbc:clickhouse://127.0.0.1:8123/dm"
    val dbtable = List("dm_v_as_djk_info","dm_v_as_djkfq_info","pri_cust_asset_acct_info","pri_cust_asset_info","pri_cust_base_info","pri_cust_liab_acct_info","pri_cust_liab_info","pri_cust_contact_info","pri_credit_info","pri_star_info")
    val pro = new Properties()
    pro.put("driver","cc.blynk.clickhouse.ClickHouseDriver")
    pro.put("user","default")
    pro.put("password","")
    pro.put("compress","false")

    val tblNameDst= "pri_cust_contact_info"
//    val tblNameDsts = List("pri_cust_liab_acct_info")
//    val tblNameDsts = List("pri_cust_liab_info")
//    val tblNameDsts = List("pri_star_info")



    var df = reader.option("dbtable", tblNameDst).load()
    val columnNames = df.columns.toList.map(name => name.substring(tblNameDst.length + 1)).toArray
    df = df.toDF(columnNames: _*)
    //      df.dropDuplicates()

    df=df.where($"contact"=!="无"&&$"contact"=!=""&&$"contact"=!="-")
    println(df.count())


    val x= df.rdd.map(row => (row.getString(0),(row.getString(1),row.getString(2),if(phone.contains(row.getString(1))) row.getString(2) else "",if(!phone.contains(row.getString(1))) row.getString(2) else "")))
      .reduceByKey((x,y)=>
        if(phone.contains(y._1))
          (x._1,x._2,if(x._3=="") y._2 else  x._3+";"+y._2,x._4)
        else (x._1,x._2,x._3,if(x._4=="") y._2 else x._4+";"+y._2)

      )
      .toDF()

    val sp=x.select(col("_1").as("uid"),col("_2").getItem("_1").as("con_type"),col("_2").getItem("_2").as("contact"),col("_2").getItem("_3").as("contact_phone"),col("_2").getItem("_4").as("contact_address"))
    //      sp.show(20,false)
    val  res=sp.rdd.map(row1 =>(row1.getString(0),(row1.getString(1),row1.getString(2),row1.getString(3).split(";").toList.distinct.reduce((x,y)=>x+","+y),row1.getString(4).split(";").toList.distinct.reduce((x,y)=>x+","+y))))
      .toDF()
    val resDF=res.select(col("_1").as("uid"),col("_2").getItem("_3").as("contact_phone"),col("_2").getItem("_4").as("contact_address"))
    resDF.show(10,false)

    //jdbc方式写入
      resDF.write
      .mode(SaveMode.Append)
      .option("batchsize", "500000")
      .option("isolationLevel", "NONE")
      .option("numPartitions", "1")
      .jdbc(url,    //CommonConfig.CLICK_URL
        dbtable(7), pro)





    val tblNameDsts1 = "dm_v_as_djk_info"
    var df1 = reader.option("dbtable", tblNameDsts1).load()
    val columnNames1 = df1.columns.toList.map(name => name.substring(tblNameDsts1.length + 1)).toArray
    df1 = df1.toDF(columnNames1: _*)
    println(df1.count())
    val sd1=df1.dropDuplicates(Seq("card_no")).drop("prod_name")
    val sdd1=sd1.na.fill(0,Seq("bankacct_bal")).na.fill(0,Seq("bal"))
    println(sdd1.count())
    sdd1.show()

    sdd1.write
      .mode(SaveMode.Append)
      .option("batchsize", "500000")
      .option("isolationLevel", "NONE")
      .option("numPartitions", "1")
      .jdbc(url,    //CommonConfig.CLICK_URL
        dbtable(0), pro)
//
    val tblNameDsts2 = "dm_v_as_djkfq_info"
    var df2 = reader.option("dbtable", tblNameDsts2).load()
    val columnNames2 = df2.columns.toList.map(name => name.substring(tblNameDsts2.length + 1)).toArray
    df2 = df2.toDF(columnNames2: _*)
    println(df2.count())
    val sd2=df2.dropDuplicates()
    println(sd2.count())
    sd2.show()

    sd2.write
      .mode(SaveMode.Append)
      .option("batchsize", "500000")
      .option("isolationLevel", "NONE")
      .option("numPartitions", "1")
      .jdbc(url,    //CommonConfig.CLICK_URL
        dbtable(1), pro)

    val tblNameDsts3 = "pri_cust_asset_acct_info"
    var df3 = reader.option("dbtable", tblNameDsts3).load()
    val columnNames3 = df3.columns.toList.map(name => name.substring(tblNameDsts3.length + 1)).toArray
    df3 = df3.toDF(columnNames3: _*)
    println(df3.count())
    val sd3=df3.dropDuplicates(Seq("acct_no"))
    val sdd3=sd3.na.fill(0,Seq("avg_mth","rate","bal","avg_qur","avg_year"))
    println(sd3.count())
    sdd3.show()

    sdd3.write
      .mode(SaveMode.Append)
      .option("batchsize", "500000")
      .option("isolationLevel", "NONE")
      .option("numPartitions", "1")
      .jdbc(url,    //CommonConfig.CLICK_URL
        dbtable(2), pro)


    val tblNameDsts4 = "pri_cust_asset_info"
    var df4 = reader.option("dbtable", tblNameDsts4).load()
    val columnNames4 = df4.columns.toList.map(name => name.substring(tblNameDsts4.length + 1)).toArray
    df4 = df4.toDF(columnNames4: _*)
    println(df4.count())
    val sd4=df4.dropDuplicates()
    val sdd4=sd4.na.fill(0,Seq("all_bal","avg_mth","avg_qur","avg_year","sa_bal","td_bal","fin_bal","sa_crd_bal","td_crd_bal","sa_td_bal","ntc_bal","td_3m_bal","td_6m_bal","td_1y_bal","td_2y_bal"," td_3y_bal"," td_5y_bal","oth_td_bal","cd_bal"))
    println(sd4.count())
    sd4.show()

    sdd4.write
      .mode(SaveMode.Append)
      .option("batchsize", "500000")
      .option("isolationLevel", "NONE")
      .option("numPartitions", "1")
      .jdbc(url,    //CommonConfig.CLICK_URL
        dbtable(3), pro)

    val tblNameDsts5 = "pri_cust_base_info"
    var df5 = reader.option("dbtable", tblNameDsts5).load()
    val columnNames5 = df5.columns.toList.map(name => name.substring(tblNameDsts5.length + 1)).toArray
    df5 = df5.toDF(columnNames5: _*)
    println(df5.count())
    val sd5=df5.dropDuplicates()
    println(sd5.count())
    sd5.show()

    sd5.write
      .mode(SaveMode.Append)
      .option("batchsize", "500000")
      .option("isolationLevel", "NONE")
      .option("numPartitions", "1")
      .jdbc(url,    //CommonConfig.CLICK_URL
        dbtable(4), pro)

    val tblNameDsts6 = "pri_cust_liab_acct_info"
    var df6 = reader.option("dbtable", tblNameDsts6).load()
    val columnNames6 = df6.columns.toList.map(name => name.substring(tblNameDsts6.length + 1)).toArray
    df6 = df6.toDF(columnNames6: _*)
    println(df6.count())
    val sd6=df6.na.fill(0,Seq("loan_amt","loan_bal","bal","credit_amt","frst_intr","actu_intr","owed_int_in","owed_int_out","delay_bal","guar_amount","guar_eva_value","guar_con_value"))
    sd6.write
      .mode(SaveMode.Append)
      .option("batchsize", "50000")
      .option("isolationLevel", "NONE")
      .option("numPartitions", "1")
      .jdbc(url,    //CommonConfig.CLICK_URL
        dbtable(5), pro)
//
    val tblNameDsts7 = "pri_cust_liab_info"
    var df7 = reader.option("dbtable", tblNameDsts7).load()
    val columnNames7 = df7.columns.toList.map(name => name.substring(tblNameDsts7.length + 1)).toArray
    df7 = df7.toDF(columnNames7: _*)
    println(df7.count())
    val sd7=df7.dropDuplicates()
    val sdd7=sd7.na.fill(0,Seq("all_bal","bad_bal","due_intr","norm_bal","delay_bal"))
    println(sd7.count())
    sd7.show()

    sdd7.write
      .mode(SaveMode.Append)
      .option("batchsize", "500000")
      .option("isolationLevel", "NONE")
      .option("numPartitions", "1")
      .jdbc(url,    //CommonConfig.CLICK_URL
        dbtable(6), pro)
//
    val tblNameDsts8 = "pri_credit_info"
    var df8 = reader.option("dbtable", tblNameDsts8).load()
    val columnNames8 = df8.columns.toList.map(name => name.substring(tblNameDsts8.length + 1)).toArray
    df8 = df8.toDF(columnNames8: _*)
    println(df8.count())
    val sd8=df8.dropDuplicates(Seq("uid"))
    println(sd8.count())
    sd8.show()

    sd8.write
      .mode(SaveMode.Append)
      .option("batchsize", "500000")
      .option("isolationLevel", "NONE")
      .option("numPartitions", "1")
      .jdbc(url,    //CommonConfig.CLICK_URL
        dbtable(8), pro)
//
    val tblNameDsts9 = "pri_star_info"
    var df9 = reader.option("dbtable", tblNameDsts9).load()
    val columnNames9 = df9.columns.toList.map(name => name.substring(tblNameDsts9.length + 1)).toArray
    df9 = df9.toDF(columnNames9: _*)
    println(df9.count())
    val sd9=df9.dropDuplicates(Seq("uid"))
    println(sd9.count())
    sd9.show()

    sd9.write
      .mode(SaveMode.Append)
      .option("batchsize", "500000")
      .option("isolationLevel", "NONE")
      .option("numPartitions", "1")
      .jdbc(url,    //CommonConfig.CLICK_URL
        dbtable(9), pro)






    session.close()
    session.stop()
  }


}
