package org.apache.spark.streaming.rabbitmq

import java.io.IOException
import java.nio.charset.StandardCharsets
import java.sql.Timestamp
import java.util
import java.util.{Locale, Optional, List => JList}
import java.util.concurrent.atomic.AtomicLong

import com.rabbitmq.client.{AMQP, Channel, Connection, ConnectionFactory, DefaultConsumer, Envelope}
import javax.annotation.concurrent.GuardedBy
import org.apache.spark.SparkEnv
import org.apache.spark.internal.Logging
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.streaming.{ContinuousRecordEndpoint, ContinuousRecordPartitionOffset, GetRecord}
import org.apache.spark.sql.sources.v2.reader.{InputPartition, InputPartitionReader}
import org.apache.spark.sql.sources.v2.reader.streaming.{ContinuousInputPartitionReader, ContinuousReader, Offset, PartitionOffset}
import org.apache.spark.sql.types.StructType
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.RpcUtils

import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
/**
 *
 * spark amqp ContinuousReader
 *
 * <pre>
 *
 * Created by zhenqin.
 * User: zhenqin
 * Date: 2021/7/25
 * Time: 19:18
 * Vendor: yiidata.com
 *
 * </pre>
 *
 * @author zhenqin
 */
class AmqpContinuousReader(schema: StructType, options: Map[String, String])
  extends ContinuousReader
  with Logging {


  assert(SparkSession.getActiveSession.isDefined)
  val spark = SparkSession.getActiveSession.get
  val numPartitions: Int = spark.sparkContext.defaultParallelism


  private def includeTimestamp: Boolean = options.getOrElse("includeTimestamp", "false").toBoolean


  private val host: String = options.get("host").getOrElse("localhost")
  private val port: Int = options.get("port").getOrElse("5672").toInt


  /**
   * rabbitmq queue Name
   */
  val queueName: String = options.get("queuename").orElse(throw new IllegalArgumentException("can not get amqp.queueName")).get

  /**
   * rabbitmq virtualHost
   */
  val virtualHost: String = options.get("virtualhost").getOrElse("/")


  /**
   * rabbitmq user
   */
  val username: String = options.get("username").orNull


  /**
   * rabbitmq password
   */
  val password: String = options.get("password").orNull

  /**
   * 创建连接
   */
  private var connection: Connection = null


  @GuardedBy("this")
  private var channel: Channel = null



  @GuardedBy("this")
  private val buckets = Seq.fill(numPartitions)(new ListBuffer[(UTF8String, UTF8String, UTF8String, UTF8String, Long, UTF8String)])


  private var startOffset: AmqpOffset = _

  @GuardedBy("this")
  private val currentOffset: AtomicLong = new AtomicLong(0)



  private val recordEndpoint = new ContinuousRecordEndpoint(buckets, this)
  @volatile private var endpointRef: RpcEndpointRef = _



  private def initialize(): Unit = synchronized {
    val factory = new ConnectionFactory
    factory.setHost(host)
    factory.setPort(port)
    factory.setUsername(username)
    factory.setPassword(password)
    factory.setVirtualHost(virtualHost)
    // 创建与RabbitMQ服务器的TCP连接
    val connection = factory.newConnection
    // 创建一个频道
    channel = connection.createChannel()

    val consumer = new DefaultConsumer(channel) {
      @throws[IOException]
      override def handleDelivery(consumerTag: String, envelope: Envelope, properties: AMQP.BasicProperties, body: Array[Byte]): Unit = {
        if(body.length != 0) {
          val message = new String(body, StandardCharsets.UTF_8)
          AmqpContinuousReader.this.synchronized {
            val newData = (
              UTF8String.fromString(consumerTag),
              UTF8String.fromString(envelope.getExchange),
              UTF8String.fromString(envelope.getRoutingKey),
              UTF8String.fromString(queueName),
              System.currentTimeMillis(),
              UTF8String.fromString(message)
            )
            currentOffset.incrementAndGet()
            val index = currentOffset.get() % numPartitions
            buckets(index.toInt) += newData
          }
        }
      }
    }

    // channel绑定队列，autoAck为true表示一旦收到消息则自动回复确认消息
    channel.basicConsume(queueName, true, consumer)
  }

  override def mergeOffsets(offsets: Array[PartitionOffset]): Offset = {
    assert(offsets.length == numPartitions)
    val offs = offsets
      .map(_.asInstanceOf[ContinuousRecordPartitionOffset])
      .sortBy(_.partitionId)
      .map(_.offset)
      .toList
    AmqpOffset(offs)
  }

  override def deserializeOffset(json: String): Offset = {
    val l = json.substring(1, json.length-1).split(",").map(it => it.trim()).map(it=>it.toInt).toList
    AmqpOffset(l)
  }

  override def setStartOffset(offset: Optional[Offset]): Unit = {
    this.startOffset = offset
      .orElse(AmqpOffset(List.fill(numPartitions)(0)))
      .asInstanceOf[AmqpOffset]
    recordEndpoint.setStartOffsets(startOffset.offsets)
  }

  override def getStartOffset: Offset = {
    Option(startOffset).getOrElse(throw new IllegalStateException("start offset not set"))
  }

  override def commit(end: Offset): Unit = {
    val endOffset = end match {
      case off: AmqpOffset => off
      case _ => throw new IllegalArgumentException(s"AmqpContinuousReader.commit()" +
        s"received an offset ($end) that did not originate with an instance of this class")
    }

    endOffset.offsets.zipWithIndex.foreach {
      case (offset, partition) =>
        val max = startOffset.offsets(partition) + buckets(partition).size
        if (offset > max) {
          throw new IllegalStateException("Invalid offset " + offset + " to commit" +
            " for partition " + partition + ". Max valid offset: " + max)
        }
        val n = offset - startOffset.offsets(partition)
        buckets(partition).trimStart(n)
    }
    startOffset = endOffset
    recordEndpoint.setStartOffsets(startOffset.offsets)
  }

  override def stop(): Unit = {
    if (connection != null) {
      try {
        // Unfortunately, BufferedReader.readLine() cannot be interrupted, so the only way to
        // stop the readThread is to close the socket.
        connection.close()
      } catch {
        case e: IOException =>
      }
      connection = null
    }
  }

  override def readSchema(): StructType = this.schema


  override def planInputPartitions(): JList[InputPartition[InternalRow]] = {
    val endpointName = s"AmqpContinuousReaderEndpoint-${java.util.UUID.randomUUID()}"
    endpointRef = recordEndpoint.rpcEnv.setupEndpoint(endpointName, recordEndpoint)

    val offsets = startOffset match {
      case off: AmqpOffset => off.offsets
      case off =>
        throw new IllegalArgumentException(
          s"invalid offset type ${off.getClass} for AmqpContinuousReader")
    }

    if (offsets.size != numPartitions) {
      throw new IllegalArgumentException(
        s"The previous run contained ${offsets.size} partitions, but" +
          s" $numPartitions partitions are currently configured. The numPartitions option" +
          " cannot be changed.")
    }

    startOffset.offsets.zipWithIndex.map {
      case (offset, i) =>
        AmqpContinuousInputPartition(
          schema, endpointName, i, offset, includeTimestamp): InputPartition[InternalRow]
    }.asJava
  }
}


/**
 * Continuous text socket input partition.
 */
case class AmqpContinuousInputPartition(schema: StructType,
                                        driverEndpointName: String,
                                        partitionId: Int,
                                        startOffset: Int,
                                        includeTimestamp: Boolean)
  extends InputPartition[InternalRow] {

  override def createPartitionReader(): InputPartitionReader[InternalRow] =
    new AmqpContinuousInputPartitionReader(
      schema,
      driverEndpointName,
      partitionId,
      startOffset,
      includeTimestamp)
}

/**
 * Continuous text socket input partition reader.
 *
 * Polls the driver endpoint for new records.
 */
class AmqpContinuousInputPartitionReader(
                                          schema: StructType,
                                          driverEndpointName: String,
                                          partitionId: Int,
                                          startOffset: Int,
                                          includeTimestamp: Boolean)
  extends ContinuousInputPartitionReader[InternalRow] {

  private val endpoint = RpcUtils.makeDriverRef(
    driverEndpointName,
    SparkEnv.get.conf,
    SparkEnv.get.rpcEnv)

  private var currentOffset = startOffset
  private var current: Option[InternalRow] = None

  override def next(): Boolean = {
    try {
      current = getRecord
      while (current.isEmpty) {
        Thread.sleep(100)
        current = getRecord
      }
      currentOffset += 1
    } catch {
      case _: InterruptedException =>
        // Someone's trying to end the task; just let them.
        return false
    }
    true
  }

  override def get(): InternalRow = {
    current.get
  }

  override def close(): Unit = {}

  override def getOffset: PartitionOffset =
    ContinuousRecordPartitionOffset(partitionId, currentOffset)

  private def getRecord: Option[InternalRow] =
    endpoint.askSync[Option[InternalRow]](GetRecord(
      ContinuousRecordPartitionOffset(partitionId, currentOffset))).map(rec =>
      if (includeTimestamp) {
        rec
      } else {
        InternalRow(rec.get(0, schema).asInstanceOf[(UTF8String, UTF8String, UTF8String, UTF8String, Long, UTF8String)]._1)
      }
    )
}

case class AmqpOffset(offsets: List[Int]) extends Offset {
  //private implicit val formats = Serialization.formats(NoTypeHints)
  override def json: String = offsets.asJava.toString

  def getOffsets(): List[Int] = offsets
}