package com.dmall.scf.demo

import java.util.Properties

import org.apache.spark.sql.{DataFrameReader, SaveMode, SparkSession}

/**
 * @descrption
 * scf
 * @author wangxuexing
 * @date 2019/12/20
 */
object SparkMain {
  def main(args: Array[String]): Unit = {
    val spark= SparkSession
      .builder()
      .master("local[*]")
      .appName("local-1576939514234")
      .config("spark.sql.warehouse.dir", "H:\\data\\spark-ware")//不指定，默认C:\data\projects\parquet2dbs\spark-warehouse
      .enableHiveSupport()
      .getOrCreate();//激活hive支持

    //Hive 读取：http://spark.apache.org/docs/latest/sql-data-sources-hive-tables.html
    val df = spark.read.json("H:\\data\\people.json")
    //df.createGlobalTempView("common")
    println("======================common.json============================")
    df.show()
//    spark.sql(s"select * from ")
//    spark.sql("CREATE TABLE IF NOT EXISTS key_value (key INT, value STRING) USING hive")
    spark.sql("LOAD DATA LOCAL INPATH 'C:/data/spark-ware/source_data/kv1.csv' INTO TABLE key_value")
    val resultDF = spark.sql("SELECT * FROM key_value")
    println("======================key_value============================")
    resultDF.show()

    spark.sql("drop table if exists peopleResult")
    df
      .write
      .mode(SaveMode.Append)
      .saveAsTable("peopleResult")
    println("======================peopleResult all============================")
    spark.read.format("hive").table("peopleResult").show()
    println("======================peopleResult part============================")
    spark.sql("select * from peopleResult").select("companyId", "grabDate", "tableName").show()

    //操作jdbc：http://spark.apache.org/docs/latest/sql-data-sources-jdbc.html
    val connectionProperties = new Properties()
    connectionProperties.put("user", "db_pop_test")
    connectionProperties.put("password", "8tHfVWCLe7Giq8")
    val url = "jdbc:mysql://10.248.224.3:11202/dmall_scf?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&failOverReadOnly=false"
    connectionProperties.put("customSchema", "supplier_code varchar(64), supplier_name varchar(128), grab_date date, field_code varchar(128), field_name varchar(64), field_value varchar(512)")
    val jdbcDF2 = spark.read
      .jdbc(url, "dmall_scf.tyc_field_value", connectionProperties)
    println("======================0============================")
    jdbcDF2.show()

      jdbcDF2.write.mode(SaveMode.Append)//.option("createTableColumnTypes", "supplier_code varchar(64), supplier_name varchar(128), grab_date date, field_code varchar(128), field_name varchar(64), field_value varchar(512)")
      .jdbc(url, "dmall_scf.tyc_field_value", connectionProperties)
    println("======================01============================")
    spark.read
      .jdbc(url, "dmall_scf.tyc_field_value", connectionProperties).show()

/*    val dr = dataReader(spark)
    val doctor = loadData(dr, Option("dmall_scf.doctor"))
    println("======================1============================")
    doctor.show()
    dataWriterAppend(dr, "dmall_scf.doctor", s"dmall_scf.doctor1")
    val doctor1 = loadData(dr, Option("dmall_scf.doctor1"))
    println("======================2============================")
    doctor1.show()*/

    /*
        import spark.implicits._
        import spark.sql
        sql(" use db")//切换db
        //注意，collect_set 可以收集分组结果
        val ds=sql("select q_id, collect_set(kp_id) as ids from ods_q_quest_kp_rel where kp_id!=0  group by q_id");
        ds.cache();//cache起来，便于后续使用
        println("size:",ds.collect().length)//打印长度
        ds.select("q_id","ids").collect().foreach (
          t =>
          {
            val key=t.getAs[String]("q_id");//获取上面的列映射
          val value=t.getAs[Seq[String]]("ids").mkString(",");//获取上面的分组集合
            //insert redis
          }
        )
        val t1=System.nanoTime();

        println("insert redis ok! Elapsed time: " + (t1 - t0)/1000/1000 + "ms")*/
    //停止
    spark.stop();
  }

  def dataReader(spark: SparkSession): DataFrameReader ={
    spark
      .read
      .format("jdbc")//com.mysql.jdbc.Driver
      .option("url", "jdbc:mysql://10.248.224.3:11202/dmall_scf?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&failOverReadOnly=false")
      .option("user", "db_pop_test")
      .option("password", "8tHfVWCLe7Giq8")
  }

  def loadData(dataReader: DataFrameReader, table: Option[String] = None)={
    table match {
      case Some(x) => dataReader.option("dbtable", x).load()
      case None => dataReader.load()
    }
  }

  def dataWriterAppend(dataReader: DataFrameReader, srcTable: String, table: String): Unit = {
    loadData(dataReader, Option(srcTable))
      .write
      .mode(SaveMode.Append)
      .saveAsTable(table)
  }

  def dataWriterOverwrite(dataReader: DataFrameReader, srcTable: String, table: String): Unit = {
    loadData(dataReader, Option(srcTable))
      .write
      .mode(SaveMode.Overwrite)
      .saveAsTable(table)
  }
}
