package com.fine.spark.connector.mysql.input

import com.alibaba.fastjson.JSON
import com.alibaba.fastjson.serializer.SerializeFilter
import com.fine.spark.connector.base.beans.BaseConfig
import com.fine.spark.connector.base.worker.BaseWorker
import com.fine.spark.connector.mysql.beans.MysqlInputConfig
import org.apache.spark.sql.{DataFrameReader, SparkSession}

import java.util
import scala.collection.convert.ImplicitConversions._

/**
 * @author jayce
 * @date 2021/11/21 10:23 AM
 * @version 1.0
 */
class MysqlInputWorker extends BaseWorker {
  /**
   * 定义处理过程的方法
   *
   * @param bean
   * @param ss
   */
  override def process(config: BaseConfig)(implicit ss: SparkSession): Unit = {
    val mysqlInputConfig: MysqlInputConfig = config.asInstanceOf[MysqlInputConfig]
    val filtered: util.HashMap[String, String] = filterValues(mysqlInputConfig)
    filtered.put("dbtable",mysqlInputConfig.srcTable)
    val reader: DataFrameReader = ss.sqlContext.read.format("jdbc").options(filtered)
    reader.load().createOrReplaceTempView(mysqlInputConfig.viewName)
    logger.info(s"inputs, load jdbc table '${mysqlInputConfig.srcTable}' to Spark table '${mysqlInputConfig.viewName}' success")
    this.afterProcess(mysqlInputConfig)
  }


  /**
   * spark options 处理只支持 string 值
   */
  def filterValues(item: BaseConfig): util.HashMap[String, String] = {
    val json = JSON.toJSONString(item, new Array[SerializeFilter](0))
    val baseMap = JSON.parseObject(json, classOf[java.util.HashMap[String, Object]])
    val res = new util.HashMap[String, String]()
    baseMap.foreach { case (key, value) =>
      value match {
        case str: String => res.put(key, str)
        case _ =>
      }
    }
    res
  }
}
