package com.fine.spark.connector.base.worker

import com.alibaba.fastjson.JSON
import com.fine.spark.connector.base.beans.BaseConfig
import com.fine.spark.connector.base.utils.Logging
import com.fine.spark.connector.base.beans.input.BaseInputConfig
import com.fine.spark.connector.base.beans.output.BaseOutputConfig
import com.fine.spark.connector.base.beans.process.BaseProcessConfig
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import java.lang

/**
 * @author jayce
 * @date 2021/11/20 10:09 PM
 * @version 1.0
 */
trait BaseWorker extends Logging {
  /**
   * 定义处理过程的方法
   *
   * @param bean
   * @param ss
   */
  def process(bean: BaseConfig)(implicit ss: SparkSession): Unit


  def getRDDByTable(tableName: String)(implicit ss: SparkSession): RDD[Row] = {
    ss.table(tableName).rdd
  }

  protected def afterProcess(process: BaseConfig)(implicit ss: SparkSession): Unit = {
    process match {
      case inputConfig: BaseInputConfig =>
        val tableName: String = inputConfig.getViewName
        val partitions: Integer = inputConfig.getPartitions
        val cache: lang.Boolean = inputConfig.getCache
        val show: Integer = inputConfig.getShow
        // input处理情况
        if (ss.catalog.tableExists(tableName)) {
          var dataFrame: DataFrame = ss.table(tableName)
          if (partitions > 0) {
            dataFrame = dataFrame.repartition(partitions)
          }

          if (cache) {
            doCache(tableName)
          }

          if (show > 0) {
            ss.table(tableName).show(show)
          }
        }
      case _: BaseOutputConfig =>
      // output处理情况
      case processConfig: BaseProcessConfig =>
        val partitions: Integer = processConfig.getPartitions
        val tableName: String = processConfig.getViewName
        val cache: lang.Boolean = processConfig.getCache
        val show: Integer = processConfig.getShow
        if (ss.catalog.tableExists(tableName)) {
          var dataFrame: DataFrame = ss.table(tableName)
          if (partitions > 0) {
            dataFrame = dataFrame.repartition(partitions)
          }

          if (cache) {
            doCache(tableName)
          }

          if (show > 0) {
            ss.table(tableName).show(show)
          }
        }
      case _ =>
    }
  }

  private def doCache(tableName: String)(implicit ss: SparkSession): Unit = {
    ss.sql(s"cache table $tableName")
    logger.info(s"cache table '$tableName' success.")
  }
}
