//package spark.kafka
//
//import java.util.Properties
//
//import akka.actor.Actor
//import kafka.consumer._
//import kafka.message.MessageAndMetadata
//import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
//import org.apache.kafka.common.serialization.StringSerializer
//
//import scala.collection.mutable
//
//
///**
//	* 消费者，接收消息
//	*/
//class MyKafkaConsumer(topic: String) extends Actor {
//		var consumer: ConsumerConnector = _
//
//		/**
//			* 初始化的方法
//			*/
//		def init(): MyKafkaConsumer = {
//				val properties = new Properties()
//				properties.put("zookeeper.connect", "nn1.hadoop:2181,nn2.hadoop:2181,s1.hadoop:2181")
//				properties.put("group.id", "my_group_object")
//				properties.put("zookeeper.session.timeout.ms", "60000")
//				consumer = Consumer.create(new ConsumerConfig(properties))
//				this
//		}
//
//		override def act(): Unit = {
//				var topicConfig = new mutable.HashMap[String, Int]()
//				topicConfig += (topic -> 1)
//				val message: collection.Map[String, List[KafkaStream[Array[Byte], Array[Byte]]]] = consumer.createMessageStreams(topicConfig)
//				val kafkaStream: KafkaStream[Array[Byte], Array[Byte]] = message.get(topic).get(0)
//				val iter: ConsumerIterator[Array[Byte], Array[Byte]] = kafkaStream.iterator()
//				while (iter.hasNext()) {
//						val messages: MessageAndMetadata[Array[Byte], Array[Byte]] = iter.next()
//						val bytes: Array[Byte] = messages.message()
//						println(s"接收到的是：${new String(bytes)}")
//						Thread.sleep(1000)
//				}
//
//		}
//}
//
//object MyKafkaConsumer {
//		// 调用 init 方法
//		def apply(topic: String): MyKafkaConsumer = new MyKafkaConsumer(topic).init()
//}
//
//
///**
//	* 生产者，用于发送消息
//	*/
//class MyKafkaProduct(val topic: String) extends Actor {
//		var producer: KafkaProducer[String, String] = _
//
//		def init(): MyKafkaProduct = {
//				val properties = new Properties()
//				properties.put("bootstrap.servers", "nn1.hadoop:9092,nn2.hadoop:9092,s1.hadoop:9092")
//				properties.put("key.serializer", classOf[StringSerializer].getName())
//				properties.put("value.serializer", classOf[StringSerializer].getName())
//				//每条消息对应一个ProducerRecord 对象。一下两种发送消息，默认是异步的，会将消息缓存到消息缓冲区中，当消息在消息缓冲区中累计到一定数量后作为一个RecordBatch 再发迭
//				//		public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) // 可以在回调函数中进行相应处理，
//				//      public Future<RecordMetadata> send(ProducerRecord<K, V> record)
//				producer = new KafkaProducer[String, String](properties)
//				this
//		}
//
//		override def act(): Unit = {
//				var num = 1
//				while (true) {
//						val stringMessage = new String(s"测试发送数字：${num}")
//						println(s"发送${num}完成")
//						producer.send(new ProducerRecord[String, String](this.topic, stringMessage))
//						num += 1
//						if (num > 10) num = 0
//						Thread.sleep(3000)
//				}
//		}
//}
//
//object MyKafkaProduct {
//		def apply(topic: String): MyKafkaProduct = new MyKafkaProduct(topic).init()
//}
//
///**
//	* 测试类
//	*/
//object TestKafkaObject {
//		def main(args: Array[String]): Unit = {
//				val topic = "zhangjiang_test_object"
//				val product = MyKafkaProduct(topic)
//				val consumer = MyKafkaConsumer(topic)
//				consumer.start()
//				product.start()
//		}
//}
//
//
//
//
