package com.guchenbo.spark.sql

import org.apache.spark
import org.apache.spark.sql.{AnalysisException, DataFrame, SparkSession}

/**
 * 代码片段，用于测试，spark-shell
 *
 * @author guchenbo
 * @date 2022/2/22
 */
object SnippetDemo {

  def main(args: Array[String]): Unit = {
    //    System.getenv().put("HADOOP_CONF_DIR","/Users/guchenbo/opt/etc/hadoop/ark32")

    val spark = SparkSession.builder().master("local").appName("demo")
      .config("hive.metastore.uris", "thrift://ark150:9083")
      //      .config("spark.sql.warehouse.dir", "hdfs:///user/hive/warehouse")
      .enableHiveSupport().getOrCreate()

    var sql =
      """
        |CREATE TABLE turing_monitor.gp_sales2
        |(
        |    prod_id     int,
        |    amount_sold int
        |)
        |    USING JDBC
        |    OPTIONS (
        |        url "jdbc:postgresql://10.58.11.12:5432/pgtest0611",
        |        dbtable "public.sales",
        |        USER 'pgadmin',
        |        password 'pgadmin123',
        |        driver 'org.postgresql.Driver'
        |        )
        |    COMMENT '连接到外部数据库的表'
        |    TBLPROPERTIES ('origin' = 'public.sales')
        |""".stripMargin
    println(sql)

    //    spark.sql("SELECT * FROM turing_monitor.gp_sales")
    sql = "SELECT * FROM turing_monitor.gp_public_model_report_psi_test_df LIMIT 1"
    sql = "SELECT * FROM turing.model_report_psi_col_df"
    sql = "INSERT OVERWRITE TABLE turing_monitor.string_test_par PARTITION (ds = '2022-04-08') SELECT y_tag,score,date FROM turing_monitor.string_test WHERE ds='2021-10-22' LIMIT 100"
    sql = "SELECT * FROM turing_monitor.string_test_par"
    val df = spark.sql(sql)
    df.explain(true)
    df.show()
    //    df.write.saveAsTable("turing.model_report_psi_col_df3")
    //    spark.sql("create database usertest")
    //    executeSparkSql(spark, sql, dropFirst = true, retry = false)
  }

  val SCHEMA_ERROR = "does not allow user-specified schemas";

  def executeSparkSql(spark: SparkSession, sql: String, dropFirst: Boolean, retry: Boolean):
  DataFrame = {
    try {
      if (dropFirst) {
        val ddlSql =
          """
            |
            |DROP TABLE IF EXISTS turing_monitor.gp_test01;
            |
            |CREATE TABLE IF NOT EXISTS turing_monitor.gp_test01
            |    USING jdbc
            |    OPTIONS (
            |        url "jdbc:postgresql://10.57.36.55:5432/shangqi_db",
            |        dbtable "public.test01",
            |        user "gpadmin",
            |        password "gpadmin123",
            |        driver "org.postgresql.Driver",
            |        customSchema "a double,b STRING"
            |        )
            |    COMMENT 'spark to test01';
            |
            |""".stripMargin
      }
      spark.sql(sql)
      spark.sql("REFRESH TABLE turing_monitor.gp_public_model_report_psi_test_df")
      //      spark.sql("clear cache")
      spark.sql(sql)
    } catch {
      case e: Exception =>
        if (retry) {
          val msg = e.getMessage
          if (msg != null && msg.contains(SCHEMA_ERROR)) {
            println(s"sql error $msg, then retry once")
            executeSparkSql(spark, sql, dropFirst = true, retry = false)
          }
        }
        throw e
    }
  }
}
