package com.dataworker.spark.sql.kyuubi

import com.dataworker.spark.sql.DataworkerSQLConf
import org.apache.spark.sql.catalyst.SQLConfHelper
import org.apache.spark.sql.catalyst.catalog.HiveTableRelation
import org.apache.spark.sql.catalyst.planning.ScanOperation
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.{DataWorkerSQLException, SparkSession, Strategy}

/**
 *
 * @author melin 2021/11/19 12:02 下午
 */
case class LimitHivePartitionStrategy (session: SparkSession)
  extends Strategy with SQLConfHelper {
  override def apply(plan: LogicalPlan): Seq[SparkPlan] = {
    val enabled = conf.getConf(DataworkerSQLConf.SCAN_ALL_HIVE_PARTITION_DISABLED)
    if (enabled) {
      plan match {
        case ScanOperation(_, _, relation: HiveTableRelation) if relation.isPartitioned =>
          relation.prunedPartitions match {
            case Some(prunedPartitions) => if (prunedPartitions.size == 0) {
              val fullName = relation.tableMeta.qualifiedName
              val partCols = relation.partitionCols.map(_.name).mkString(",")
              throw new DataWorkerSQLException(s"${fullName} 为分区表, 禁止全表扫描分区表，请给添加分区过滤条件，分区字段: ${partCols}")
            } else {
              Nil
            }
            case _ => Nil
          }
        case _ => Nil
      }
    }

    val maxHivePartition = conf.getConf(DataworkerSQLConf.SCAN_MAX_HIVEPARTITION)
    plan match {
      case ScanOperation(_, _, relation: HiveTableRelation) if relation.isPartitioned =>
        relation.prunedPartitions match {
          case Some(prunedPartitions) => if (prunedPartitions.size > maxHivePartition) {
            throw new DataWorkerSQLException(
              s"""
                 |SQL job scan hive partition: ${prunedPartitions.size}
                 |exceed restrict of hive scan maxPartition $maxHivePartition
                 |You should optimize your SQL logical according partition structure
                 |or shorten query scope such as p_date, detail as below:
                 |Table: ${relation.tableMeta.qualifiedName}
                 |Partition Structure: ${relation.partitionCols.map(_.name).mkString(" -> ")}
                 |""".stripMargin)
          } else {
            Nil
          }
          case _ => val totalPartitions = session
            .sessionState.catalog.externalCatalog.listPartitionNames(
            relation.tableMeta.database, relation.tableMeta.identifier.table)
            if (totalPartitions.size > maxHivePartition) {
              throw new DataWorkerSQLException(
                s"""
                   |Your SQL job scan a whole huge table without any partition filter,
                   |You should optimize your SQL logical according partition structure
                   |or shorten query scope such as p_date, detail as below:
                   |Table: ${relation.tableMeta.qualifiedName}
                   |Partition Structure: ${relation.partitionCols.map(_.name).mkString(" -> ")}
                   |""".stripMargin)
            } else {
              Nil
            }
        }
      case _ => Nil
    }
  }
}
