package com.guchenbo.spark.sql

import com.guchenbo.spark.sql.CsvReader.path
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * 代码片段，用于测试，spark-shell
 *
 * @author guchenbo
 * @date 2022/2/22
 */
object CreateTableDemo {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().master("local").appName("CreateTableDemo")
      //      .config("hive.metastore.uris", "thrift://localhost:9083")
//            .config("hive.metastore.uris", "thrift://ark150:9083")
      //            .config("spark.sql.warehouse.dir", "/Users/guchenbo/opt/data/hive/warehouse")
      //            .config("spark.sql.warehouse.dir", "hdfs:///user/hive/warehouse")
//      .enableHiveSupport()
      .getOrCreate()
    //
        var sql =
          """
            |
            |DROP TABLE IF EXISTS turing.oracle_sykj_test
            |""".stripMargin
            spark.sql(sql)

        val url = "jdbc:oracle:thin:@10.58.12.6:48161:XE"

        sql =
          s"""
             |
             |CREATE TABLE IF NOT EXISTS turing.oracle_sykj_test
             |    USING jdbc
             |    OPTIONS (
             |        url "$url",
             |        dbtable "test",
             |        user "230715",
             |        password "oracle",
             |        driver "oracle.jdbc.driver.OracleDriver"
             |        )
             |    COMMENT 'spark to test01'
             |
             |""".stripMargin
            spark.sql(sql)
    //
    //    sql = "DROP TABLE IF EXISTS turing_monitor.t11"
    //    spark.sql(sql)
    //
    //    sql = "CREATE TABLE IF NOT EXISTS turing_monitor.t11 (`姓名` STRING) USING hive"
    //        spark.sql(sql)
    //
//        sql = "select * from turing.oracle_sykj_test"
//            spark.sql(sql).show()
    //
    //
    //
    //    val ds = spark.read.option("sep", ";")
    //      .option("header", "true")
    //      .csv(path("chinese.csv"))
    //
    //    ds.printSchema()
    //
    //
    //    sql = "DROP TABLE IF EXISTS turing_monitor.t2"
    //    spark.sql(sql)
    //    ds.write.saveAsTable("turing_monitor.t2")
    //
    //
    //    sql = "select `姓名` from turing_monitor.t2"
    //    spark.sql(sql).show()
//
    var sqlText = "select * from turing.oracle_sykj_test"
    var df=spark.sql(sqlText)
    println(df.schema.json)
//
//    sqlText =
//      """
//        |with ds_t737 as ( select * from turing.feature_data )select bin_name,dim_name,dim_val,feature_name,sum(label) bc,sum(1-label) gc,substr(dt,1,4) year,concat(substr(dt,1,4),'Q',cast(1+(cast(substr(dt,6,2) as int)-1)/3 as int)) qur,substr(dt,1,7) mon,dt from (( select feature_name,feature_value,range,bin_name,dim_name,dim_val,label,dt from (( select concat(',',range,',') bin_range0,* from (select stack(16,'0','0','0','DFLT_VAL','apply_city','自定义','0.1','01.DFLT_VAL','single_value','null','null','0','0','0','上海','apply_city','自定义','0.2','02.上海','single_value','null','null','0','0','0','杭州,南京,苏州','apply_city','自定义','0.03','03.杭州,南京,苏州','enum','null','null','0','0','0','宁波,温州','apply_city','自定义','0.05','04.宁波,温州','enum','null','null','0','0','0','绍兴,嘉兴','apply_city','自定义','0.05','05.绍兴,嘉兴','enum','null','null','0','0','0','南昌,长沙','apply_city','自定义','0.1','06.南昌,长沙','enum','null','null','0','0','0','重庆,成都','apply_city','自定义','0.25','07.重庆,成都','enum','null','null','0','0','0','丽水,诸暨','apply_city','自定义','0.02','08.丽水,诸暨','enum','null','null','0','0','0','合肥,武汉','apply_city','自定义','0.02','09.合肥,武汉','enum','null','null','0','0','0','九江,鄱阳','apply_city','自定义','0.08','10.九江,鄱阳','enum','null','null','0','0','0','南昌','apply_city','自定义','0.05','11.南昌','single_value','null','null','0','0','0','海南','apply_city','自定义','0.05','12.海南','single_value','null','null','0','0','0','DFLT_VAL','EAZZ43000011','自定义','0.1','13.DFLT_VAL','single_value','null','null','0','0','0','-1','EAZZ43000011','自定义','0.2','14.-1','single_value','null','null','0','0','0','[0,4)','EAZZ43000011','自定义','0.3','15.[0,4)','range','0','4','0','0','0','[4,+)','EAZZ43000011','自定义','0.4','16.[4,+)','range','4','999999') as (score,p_sample,n_sample,range,variable_name,bin_method,train_percent,bin_name,bin_range_type,low,up)) ) as table_5)  right join (( select 'prod' as dim_name,prod as dim_val,feature_name,feature_value,label,dt from (( select prod,chan,Name as feature_name,label as label,value as feature_value,dt as dt from ds_t737 ) as field_0)  union all select 'chan' as dim_name,chan as dim_val,feature_name,feature_value,label,dt from (( select prod,chan,Name as feature_name,label as label,value as feature_value,dt as dt from ds_t737 ) as field_0)   ) as table_4)  on variable_name = feature_name and ((bin_range_type='range' and feature_value>cast(low as double) and feature_value<=cast(up as double)) or (bin_range_type<>'range' and instr(bin_range0,concat(',',feature_value,','))>0)) ) as table_6)  group by bin_name,range,dim_name,dim_val,feature_name,dt limit 10
//        |""".stripMargin
//    df = spark.sql(sqlText)
//    df.explain()
    df.show

  }

}
