package com.mshbj.choperator.scalacode

import java.util.Properties

import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

/**
 * sparksql操作clickhouse
 */
object SparkSQLOparateCH {
  def main(args: Array[String]): Unit = {
    //创建SparkSession
    val session: SparkSession = SparkSession.builder().master("local").appName("test").getOrCreate()


    val list = List[String](
      "{\"id\":6,\"name\":\"t1\",\"age\":10}",
      "{\"id\":7,\"name\":\"t2\",\"age\":11}",
      "{\"id\":8,\"name\":\"t3\",\"age\":12}",
      "{\"id\":9,\"name\":\"t4\",\"age\":13}",
      "{\"id\":10,\"name\":\"t5\",\"age\":14}"
    )

    import session.implicits._
    val frame: DataFrame = session.read.json(list.toDS())
    frame.show()

    //frame保存到ClickHouse中
    val props = new Properties()
    props.setProperty("user","default")
    props.setProperty("password","")
    frame.write.mode(SaveMode.Append).option(JDBCOptions.JDBC_BATCH_INSERT_SIZE,1000)
      .jdbc("jdbc:clickhouse://node1:8123/newdb","t_java",props)


//    //读取ClickHouse中的数据
//    val props = new Properties()
//    props.setProperty("user","default")
//    props.setProperty("password","")
//    val frame: DataFrame = session.read.option(JDBCOptions.JDBC_BATCH_FETCH_SIZE,100).jdbc("jdbc:clickhouse://node1:8123/newdb","t_java",props)
//
//    frame.show()




  }

}
