package akka.streams

import java.util.concurrent.atomic.AtomicLong

import akka.Done
import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerMessage, ProducerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import com.typesafe.config.ConfigFactory
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer}

import scala.concurrent.Future

/**
  * Created by shichen on 2017/6/26.
  * http://doc.akka.io/docs/akka-stream-kafka/current/producer.html
    http://doc.akka.io/docs/akka-stream-kafka/current/consumer.html
  */
object AkkaStreamKafka extends App{
  import scala.concurrent.ExecutionContext.Implicits._

  val conf =
    """
      |akka {
      |  actor {
      |    provider = "akka.remote.RemoteActorRefProvider"
      |  }
      |
      |  remote {
      |    enabled-transports = ["akka.remote.netty.tcp"]
      |    netty.tcp {
      |      hostname = "0.0.0.0"
      |      port = 2552
      |    }
      |  }
      |}
    """.stripMargin
  //println(conf)
  val config = ConfigFactory.parseString(conf)

  val host = "10.1.30.122:9092"
  //默认的2551端口被QQ占着
  implicit val system = ActorSystem("akka-stream-kafka",config)
  implicit val materializer = ActorMaterializer()

  val producerSettings = ProducerSettings(system,new ByteArraySerializer,
    new StringSerializer)
    .withBootstrapServers(host)

  basicProducer()


  //Sometimes there is a need for publishing messages in the middle of the stream processing,
  // not as the last step, and then you can use Producer.flow
  def produceWithFolw() {
    val done = Source(2 to 8)
      .map { n =>
        val partition = 0
        //Message的文档提示显示不出来，但在sync-task中可以显示，奇怪
        ProducerMessage.Message(new ProducerRecord[Array[Byte], String](
          "test", partition, null, n.toString
        ), n) //n是passThrought，经过了flow，但也要传到downstream
      }
      .via(Producer.flow(producerSettings))
      .map { result =>
        val record = result.message.record
        println(s"${record.topic()}/${record.partition()} ${result.offset}:${record.value()}" +
          s"(${result.message.passThrough})")
        result
      }
      .runWith(Sink.ignore)
    done.onComplete(_ => system.terminate())
  }

  /*
  val consumerSettings = ConsumerSettings(system,new ByteArrayDeserializer,
    new StringDeserializer)
    .withBootstrapServers(host)
  //从topic test传到topic test-0
  val done2 =
    Consumer.committableSource(consumerSettings,Subscriptions.topics("test"))
    .map { msg =>
      println(s"test -> test-0: $msg")
      ProducerMessage.Message(new ProducerRecord[Array[Byte],String](
        "test-0",msg.record.value()
      ),msg.committableOffset)
    }
    .via(Producer.flow(producerSettings))
    .mapAsync(producerSettings.parallelism) { result =>
      result.message.passThrough.commitScaladsl()
    }
    .runWith(Sink.ignore)
  */



  def basicProducer(): Unit = {
    val done = Source(1 to 5)
      .map(_.toString)
      .map { elem =>
        new ProducerRecord[Array[Byte], String]("test", elem)
      }
      .runWith(Producer.plainSink(producerSettings))
    done.onComplete(_ => system.terminate())
  }

  def shareKafkaProducer(): Unit = {
    val producerSettings = ProducerSettings(system,new ByteArraySerializer,new StringSerializer)
      .withBootstrapServers(host)
    val kafkaProducer = producerSettings.createKafkaProducer()
    val done = Source(1 to 4)
      .map(_.toString)
      .map { e =>
        new ProducerRecord[Array[Byte],String]("test",e)
      }
      .runWith(Producer.plainSink(producerSettings,kafkaProducer))
  }

  def consumerExample(): Unit = {
    val consumerSettings = ConsumerSettings(system,new ByteArrayDeserializer,new StringDeserializer)
      .withBootstrapServers(host)
      .withGroupId("group1")
      .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest")
    val db = new Db
    db.loadOffset().foreach { fromOffset =>
      val partition = 0
      //manully assign offset to topic/partition
      val subscription = Subscriptions.assignmentWithOffset(new TopicPartition("test",partition) -> fromOffset)
      val done = Consumer.plainSource(consumerSettings,subscription)
        .mapAsync(1)(db.save)
        .runWith(Sink.ignore)
    }
  }
}

class Db {
  private val offset = new AtomicLong()

  def save(record: ConsumerRecord[Array[Byte],String]): Future[Done] = {
    println(s"db.save:${record.value()}")
    offset.set(record.offset())
    Future.successful(Done)
  }

  def loadOffset(): Future[Long] = Future.successful(offset.get())

  def update(data:String): Future[Done] = {
    println(s"db.update:$data")
    Future.successful(Done)
  }
}

class Rocket {
  def lauch(destination: String): Future[Done] = {
    println(s"rocket lauched to $destination")
    Future.successful(Done)
  }
}
