package com.eurlanda.spark.cassandra

import com.datastax.spark.connector.cql.CassandraConnectorConf
import com.datastax.spark.connector.rdd.ReadConf
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.cassandra._

/**
  * Created by zhudebin on 2017/4/6.
  */
object DFDemo {

  def main(args: Array[String]) {

    val conf = new SparkConf().setMaster("local[4]").setAppName("test cassandra")

//    val spark = new SparkContext(conf)

    val spark = SparkSession.builder().config(conf).getOrCreate()

    spark.setCassandraConf("ClusterOne", "ks1", ReadConf.SplitSizeInMBParam.option(32))
    spark.setCassandraConf("default", "test", ReadConf.SplitSizeInMBParam.option(128))

    spark.sparkContext.getConf.set("", "")

    val df = spark
      .read
      .format("org.apache.spark.sql.cassandra")
      .options(Map( "table" -> "words", "keyspace" -> "test"))
      .load() // This Dataset will use a spark.cassandra.input.size of 128

    val otherdf =  spark
      .read
      .format("org.apache.spark.sql.cassandra")
      .options(Map( "table" -> "words", "keyspace" -> "test" , "cluster" -> "ClusterOne"))
      .load() // This Dataset will use a spark.cassandra.input.size of 32

    val lastdf = spark
      .read
      .format("org.apache.spark.sql.cassandra")
      .options(Map(
        "table" -> "words",
        "keyspace" -> "test" ,
        "cluster" -> "ClusterOne",
        "spark.cassandra.input.split.size_in_mb" -> "48"
      )
      ).load().select("").alias("").filter("") // This Dataset will use a spark.cassandra.input.split.size of 48



  }

}
