package cn.lecosa.spark.mysql

import java.sql.{ DriverManager, PreparedStatement, Connection }
import org.apache.spark.{ SparkContext, SparkConf }

import org.apache.spark.sql.{ DataFrame, Row, SQLContext, SparkSession }
import org.apache.spark.{ SparkConf, SparkContext }
import java.util.Properties

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{ LongType, StringType, StructField, StructType }

import java.sql.Timestamp

import org.apache.spark.sql.{ SaveMode, SQLContext }
import org.apache.spark.{ SparkContext, SparkConf }

case class Train(account: String, user_id: String, user_name: String, idcard: String,
                 nick_name: String, fphone: String, email: String)

object DataFrameSql {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[2]")
    // ----------------------
    //参数 spark.sql.autoBroadcastJoinThreshold 设置某个表是否应该做broadcast，默认10M，设置为-1表示禁用
    //spark.sql.codegen 是否预编译sql成java字节码，长时间或频繁的sql有优化效果
    // spark.sql.inMemoryColumnarStorage.batchSize 一次处理的row数量，小心oom
    //spark.sql.inMemoryColumnarStorage.compressed 设置内存中的列存储是否需要压缩
    // ----------------------
    conf.set("spark.sql.shuffle.partitions", "20") //默认partition是200个
    conf.setAppName("dataframe test")

    val sc = new SparkContext(conf)
    val sqc = new SQLContext(sc)
    val lines = sc.textFile("hdfs://park01:9000/home/12306.txt").map { line => line.split("----") }

    val sqlContext = new SQLContext(sc)
    import sqlContext.implicits._
    val res = lines.map(ticket => Train(ticket(0), ticket(1), ticket(2), ticket(3), ticket(4), ticket(5), ticket(6)))
      .toDF(); //("one","two","three","four","five","six","seven")

    res.show()
    // 调用DataFrameWriter将数据写入mysql
    val dataResult = res.write.mode(SaveMode.Append).jdbc("jdbc:mysql://park01:3306/test", "ticket", JdbcUtil.getProp()) // 表可以不存在
    //
    res.printSchema()
    sc.stop()
  }
}