package com.bigdata.spark.util

import com.bigdata.spark.flume.MysqlPoolManager
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.{IntegerType, StringType}

/**
 * @author Gerry chan
 * @version 1.0
 * MySql操作工具类：
 * https://www.imooc.com/video/19728
 */
object MysqlUtil {
  def insert(tableName:String, df:DataFrame):Unit ={
    //取出字段，拼接sql语句
    val cols = df.columns
    var sql = "insert into " + tableName + " values("
    for (i<-1 to cols.length) {
      sql += " ?"
      if (i != cols.length) {
        sql += " ,"
      }
    }
    sql += ")"

    //获取字段类型
    val columnnDataType = df.schema.fields.map(_.dataType)
    //遍历DataFrame分区
    df.foreachPartition(
      partitionRecords => {
        //一次拿出一个分区，partitionRecords代表一个分区，里面有很多数据
        //一个分区创建一个jdbc连接，写完分区里的数据再关闭
        val conn = MysqlPoolManager.mysqlManager.getConnection
        conn.setAutoCommit(false)
        val preparedStatement  = conn.prepareStatement(sql)
        partitionRecords.foreach{records => {
          for (i <- 1 to cols.length) {
            val dataType = columnnDataType(i-1)
            //获取字段类型
            dataType match {
                //按类型设置sql占位符中的？
              case _ : StringType => preparedStatement.setString(
                i, records.getAs[String](i-1)
              )
              case _ : IntegerType => preparedStatement.setInt(
                i, records.getAs[Int](i-1)
              )
            }
          }
          preparedStatement.addBatch()
        }
          preparedStatement.executeBatch()
          conn.commit()
          preparedStatement.close()
          conn.close()
        }
      }
    )
  }
}
