package com.example.spark.sql

import com.example.util.SparkUtil
import org.apache.spark.sql.types.{LongType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import java.util.concurrent.{ExecutorService, Executors, Future}
import scala.collection.mutable.ArrayBuffer


/**
 * @title: MultiExecutor
 * @projectName bigdata
 * @Version: 1.0
 * @description: spark 多线程优化
 * @author leali
 * @date 2022/5/31 21:05
 */
object MultiExecutor {

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkUtil.initSimpleSparkSession(appName = "MultiExecutor")
    import spark.implicits._
    //添加一列序号
    def addColumnIndex(df: DataFrame): DataFrame = spark.sqlContext.createDataFrame(
      // 使用Zip算子添加index，然后将结果转换为RDD
      df.rdd.zipWithIndex.map { case (row, index) => Row.fromSeq(row.toSeq :+ index) },
      // 在原有的dataframe的结构上加上index的结构
      StructType(df.schema.fields :+ StructField("index", LongType, nullable = false))
    )

    //创建三个表
    Seq((1, "Tom"), (2, "Jack"), (3, "Marry"), (4, "Daniel")).toDF("id", "name")
      .createOrReplaceTempView("info1")
    Seq((1, "tom"), (2, "jack"), (4, "daniel"), (3, "marry")).toDF("id", "low_name")
      .createOrReplaceTempView("info2")
    Seq((1, "TOM"), (3, "MARRY"), (4, "DANIEL"), (2, "JACK")).toDF("id", "up_name")
      .createOrReplaceTempView("info3")
    //启动三个线程
    val executorService: ExecutorService = Executors.newFixedThreadPool(3)

    val tables: List[String] = List("info1", "info2", "info3")
    val result: ArrayBuffer[Future[Map[DataFrame, Long]]] = ArrayBuffer[Future[Map[DataFrame, Long]]]()
    for (table <- tables) {
      result.append(
        executorService.submit(() => {
          //执行规则 runRule(ru,spark)
          val frame: DataFrame = spark.sql(s"SELECT * FROM $table")
          Thread.sleep(50000)
          //给结果添加一列序号，并计算行数
          Map[DataFrame, Long](addColumnIndex(frame) -> frame.count())
        })
      )
    }
    result(0).get().head._1
      .join(result(1).get().head._1, Seq("index"))
      .join(result(2).get().head._1, Seq("index"))
      .drop("index")
      .show()
    //每次创建的线程池执行完所有规则后shutdown
    executorService.shutdown()
  }
}
