package cn.lecosa.spark.mysql
import java.util.Properties
import org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.jdbc.{ JdbcDialects, JdbcType, JdbcDialect }
import org.apache.spark.sql.types._
import org.apache.spark.{ SparkContext, SparkConf }
import java.util.Properties
import org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.jdbc.{ JdbcDialects, JdbcType, JdbcDialect }
import org.apache.spark.sql.types._
import org.apache.spark.{ SparkContext, SparkConf }
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.apache.spark.sql.SaveMode
/**
 *
 * 需要把oracle的驱动包放入spark环境上
 *
 */
object Df2Oracle {
  
   
  def main(args: Array[String]) {
    //创建SparkContext
    val sc = createSparkContext
    //创建sqlContext用来连接oracle、Hive等
    val hiveContext = new HiveContext(sc)
    val sqlContext = new SQLContext(sc);
    //加载oracle表数据，为lazy方式
    val jdbcDF = hiveContext.read.options(jdbcMap).format("jdbc").load
    //hive切换test库
    hiveContext.sql("use test")

    //向spark注册一个临时表，在临时表上进行数据操作，提高效率，但需要考虑表的规模，以防出现OOM
    //测试问题，直接从oracle上获取数据，当数据量较大时，集群计算时间就会显著增长。
    jdbcDF.registerTempTable("temp_table1")

    val noTotalId = hiveContext.sql("some sql")
    //    println("NO_TOTAL_ID_TMP Total count" + noTotalId.count())
    //    //注册NO_TOTAL_ID，以备下面计算使用
    noTotalId.registerTempTable("temp_table2")
    //未经过优化，后续会考虑加入分区、buket等优化策略，提高效率
    val results = hiveContext.sql(" some sql")
    // Drop temp tables registered to Spark
    hiveContext.dropTempTable("temp_table1")
    hiveContext.dropTempTable("temp_table2")

    results.registerTempTable("resultDF")
    //write to Hive
    hiveContext.sql("insert into ods_incom_biaoma select * from resultDF")

    //Read results from Hive can make the task efficient
    val df2Oracle = hiveContext.sql("select * from ods_incom_biaoma")

    //Registering the OracleDialect
    JdbcDialects.registerDialect(OracleDialect)

    val connectProperties = new Properties()
    connectProperties.put("user", "user")
    connectProperties.put("password", "password")
    Class.forName("oracle.jdbc.driver.OracleDriver").newInstance()

    //write back Oracle
    //Note: When writing the results back orale, be sure that the target table existing
    //    JdbcUtils.saveTable(df2Oracle, oracleDriverUrl, "ods_incom_biaoma", connectProperties)
    jdbcDF.write.mode(SaveMode.Overwrite).jdbc("jdbc:oracle:thin:@10.20.8.94:1521/orcl", "zfs_test", connectProperties)
    
//    spark1.5 append 和overwrite方式插入 底层都是overwrite  要用jdbc方式来插入
    sc.stop
  }

  val oracleDriverUrl = "oracleUrl"
  val tableName = "aTable"
  val jdbcMap = Map("url" -> oracleDriverUrl,
    "user" -> "user",
    "password" -> "password",
    "dbtable" -> tableName,
    "driver" -> "oracle.jdbc.driver.OracleDriver")

  def createSparkContext: SparkContext = {
    val conf = new SparkConf().setAppName("Data Integration checking and Computing").setMaster("spark://Master:7077")
    //SparkConf parameters setting
    //conf.set("spark.sql.autoBroadcastJoinThreshold", "50M")
    /*spark.sql.codegen 是否预编译sql成java字节码，长时间或频繁的sql有优化效果*/
    //conf.set("spark.sql.codegen", "true")
    /*spark.sql.inMemoryColumnarStorage.batchSize 一次处理的row数量，小心oom*/
    //conf.set("spark.sql.inMemoryColumnarStorage.batchSize", "1000")
    /*spark.sql.inMemoryColumnarStorage.compressed 设置内存中的列存储是否需要压缩*/
    //conf.set("spark.sql.inMemoryColumnarStorage.compressed", "true")
    val sc = new SparkContext(conf)
    sc
  }

  //overwrite JdbcDialect fitting for Oracle
  val OracleDialect = new JdbcDialect {
    override def canHandle(url: String): Boolean = url.startsWith("jdbc:oracle") || url.contains("oracle")
    //getJDBCType is used when writing to a JDBC table
    override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {
      case StringType    => Some(JdbcType("VARCHAR2(200)", java.sql.Types.VARCHAR))
      case BooleanType   => Some(JdbcType("NUMBER(1)", java.sql.Types.NUMERIC))
      case IntegerType   => Some(JdbcType("NUMBER(16)", java.sql.Types.NUMERIC))
      case LongType      => Some(JdbcType("NUMBER(16)", java.sql.Types.NUMERIC))
      case DoubleType    => Some(JdbcType("NUMBER(16,4)", java.sql.Types.NUMERIC))
      case FloatType     => Some(JdbcType("NUMBER(16,4)", java.sql.Types.NUMERIC))
      case ShortType     => Some(JdbcType("NUMBER(5)", java.sql.Types.NUMERIC))
      case ByteType      => Some(JdbcType("NUMBER(3)", java.sql.Types.NUMERIC))
      case BinaryType    => Some(JdbcType("BLOB", java.sql.Types.BLOB))
      case TimestampType => Some(JdbcType("DATE", java.sql.Types.DATE))
      case DateType      => Some(JdbcType("DATE", java.sql.Types.DATE))
      //        case DecimalType.Fixed(precision, scale) => Some(JdbcType("NUMBER(" + precision + "," + scale + ")", java.sql.Types.NUMERIC))
      //      case DecimalType.Unlimited => Some(JdbcType("NUMBER(38,4)", java.sql.Types.NUMERIC))
      case _             => None
    }
  }

}
