package org.apache.spark.streaming.rabbitmq

import java.io.IOException
import java.nio.charset.StandardCharsets
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import java.util.{Locale, Optional, List => JList}

import com.rabbitmq.client.{AMQP, Channel, Connection, ConnectionFactory, DefaultConsumer, Envelope}
import javax.annotation.concurrent.GuardedBy
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.streaming.LongOffset
import org.apache.spark.sql.sources.v2.reader.{InputPartition, InputPartitionReader}
import org.apache.spark.sql.sources.v2.reader.streaming.{MicroBatchReader, Offset}
import org.apache.spark.sql.types.StructType
import org.apache.spark.unsafe.types.UTF8String

import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer

/**
 *
 *
 *
 *
 * <pre>
 *
 * Created by zhenqin.
 * User: zhenqin
 * Date: 2022/12/1
 * Time: 下午4:04
 * Vendor: yiidata.com
 *
 * </pre>
 *
 * @author zhenqin
 */
class AmqpMicroBatchReader(schema: StructType, options: Map[String, String])
  extends MicroBatchReader
    with Logging {

  assert(SparkSession.getActiveSession.isDefined)
  val spark = SparkSession.getActiveSession.get
  val numPartitions = spark.sparkContext.defaultParallelism

  private val host: String = options.get("host").getOrElse("localhost")
  private val port: Int = options.get("port").getOrElse("5672").toInt


  /**
   * rabbitmq queue Name
   */
  val queueName: String = options.get("queuename").orElse(throw new IllegalArgumentException("can not get amqp.queueName")).get

  /**
   * rabbitmq virtualHost
   */
  val virtualHost: String = options.get("virtualhost").getOrElse("/")


  /**
   * rabbitmq user
   */
  val username: String = options.get("username").orNull


  /**
   * rabbitmq password
   */
  val password: String = options.get("password").orNull

  /**
   * 创建连接
   */
  private var connection: Connection = null


  @GuardedBy("this")
  private var channel: Channel = null


  private var startOffset: Offset = _
  private var endOffset: Offset = _

  /**
   * All batches from `lastCommittedOffset + 1` to `currentOffset`, inclusive.
   * Stored in a ListBuffer to facilitate removing committed batches.
   */
  @GuardedBy("this")
  private val batches = new ListBuffer[(UTF8String, UTF8String, UTF8String, UTF8String, Long, UTF8String)]


  @GuardedBy("this")
  private val currentOffset: AtomicLong = new AtomicLong(0)


  @GuardedBy("this")
  private var lastOffsetCommitted: LongOffset = LongOffset(-1L)


  private val initialized: AtomicBoolean = new AtomicBoolean(false)

  private def initialize(): Unit = synchronized {
    if(connection != null && channel != null) {
      return;
    }
    val factory = new ConnectionFactory
    factory.setHost(host)
    factory.setPort(port)
    factory.setUsername(username)
    factory.setPassword(password)
    factory.setVirtualHost(virtualHost)
    // 创建与RabbitMQ服务器的TCP连接
    connection = factory.newConnection
    // 创建一个频道
    channel = connection.createChannel()

    val consumer = new DefaultConsumer(channel) {
      @throws[IOException]
      override def handleDelivery(consumerTag: String, envelope: Envelope, properties: AMQP.BasicProperties, body: Array[Byte]): Unit = {
        if(body.length != 0) {
          val message = new String(body, StandardCharsets.UTF_8)
          AmqpMicroBatchReader.this.synchronized {
            val newData = (
              UTF8String.fromString(consumerTag),
              UTF8String.fromString(envelope.getExchange),
              UTF8String.fromString(envelope.getRoutingKey),
              UTF8String.fromString(queueName),
              System.currentTimeMillis(),
              UTF8String.fromString(message)
            )
            currentOffset.incrementAndGet()
            batches.append(newData)
          }
        }
      }
    }
    // channel绑定队列，autoAck为true表示一旦收到消息则自动回复确认消息
    channel.basicConsume(queueName, true, consumer)
  }

  override def setOffsetRange(start: Optional[Offset], end: Optional[Offset]): Unit = synchronized {
    startOffset = start.orElse(LongOffset(0L))
    endOffset = end.orElse(LongOffset(currentOffset.get()))
  }

  override def getStartOffset(): Offset = {
    Option(startOffset).getOrElse(throw new IllegalStateException("start offset not set"))
  }

  override def getEndOffset(): Offset = {
    Option(endOffset).getOrElse(throw new IllegalStateException("end offset not set"))
  }

  override def deserializeOffset(json: String): Offset = {
    //val l = json.substring(1, json.length-1).split(",").map(it => it.trim()).map(it=>it.toInt).toList
    //AmqpOffset(l)
    LongOffset(json.toLong)
  }

  override def readSchema(): StructType = {
    this.schema
  }

  override def planInputPartitions(): JList[InputPartition[InternalRow]] = {
    assert(startOffset != null && endOffset != null,
      "start offset and end offset should already be set before create read tasks.")

    val startOrdinal = LongOffset.convert(startOffset).get.offset.toInt + 1
    val endOrdinal = LongOffset.convert(endOffset).get.offset.toInt + 1

    // Internal buffer only holds the batches after lastOffsetCommitted
    val rawList = synchronized {
      if (initialized.compareAndSet(false, true)) {
        initialize()
      }

      val sliceStart = startOrdinal - lastOffsetCommitted.offset.toInt - 1
      val sliceEnd = endOrdinal - lastOffsetCommitted.offset.toInt - 1
      batches.slice(sliceStart, sliceEnd)
    }

    val slices = Array.fill(numPartitions)(new ListBuffer[(UTF8String, UTF8String, UTF8String, UTF8String, Long, UTF8String)])
    rawList.zipWithIndex.foreach { case (r, idx) =>
      slices(idx % numPartitions).append(r)
    }

    (0 until numPartitions).map { i =>
      val slice: ListBuffer[(UTF8String, UTF8String, UTF8String, UTF8String, Long, UTF8String)] = slices(i)
      new InputPartition[InternalRow] {
        override def createPartitionReader(): InputPartitionReader[InternalRow] =
          new InputPartitionReader[InternalRow] {
            private var currentIdx = -1

            override def next(): Boolean = {
              currentIdx += 1
              currentIdx < slice.size
            }

            override def get(): InternalRow = {
              InternalRow(slice(currentIdx)._1, slice(currentIdx)._2, slice(currentIdx)._3, slice(currentIdx)._4, slice(currentIdx)._5, slice(currentIdx)._6)
              //InternalRow(slice(currentIdx))
            }

            override def close(): Unit = {}
          }
      }
    }.toList.asJava
  }

  override def commit(end: Offset): Unit = synchronized {
    val newOffset = LongOffset.convert(end).getOrElse(
      sys.error(s"AmqpSourceStream.commit() received an offset ($end) that did not " +
        s"originate with an instance of this class")
    )

    val offsetDiff = (newOffset.offset - lastOffsetCommitted.offset).toInt

    if (offsetDiff < 0) {
      sys.error(s"Offsets committed out of order: $lastOffsetCommitted followed by $end")
    }

    batches.trimStart(offsetDiff)
    lastOffsetCommitted = newOffset
  }


  override def stop(): Unit = {
    if (connection != null) {
      try {
        // Unfortunately, BufferedReader.readLine() cannot be interrupted, so the only way to
        // stop the readThread is to close the socket.
        connection.close()
      } catch {
        case e: IOException =>
      }
      connection = null
    }
  }
}
