package com.nncsys.datastream

import com.nncsys.avro.MixStream
import com.nncsys.util.AvroUtil
import org.apache.commons.io.FileUtils
import org.apache.flink.api.common.serialization.{AbstractDeserializationSchema, SimpleStringSchema, TypeInformationSerializationSchema}
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.AvroUtils
import org.apache.flink.streaming.api.{CheckpointingMode, TimeCharacteristic}
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaConsumer, FlinkKafkaProducer, FlinkKafkaProducer011, KafkaDeserializationSchema}
import org.apache.flink.streaming.util.serialization.{KeyedSerializationSchema, TypeInformationKeyValueSerializationSchema}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.ByteArrayDeserializer

import java.io.{File, IOException}
import java.util.Properties
import java.nio.ByteBuffer
import org.apache.avro.Schema
import org.apache.avro.generic.GenericRecordBuilder
import org.apache.flink.api.common.functions.CoGroupFunction
import org.apache.flink.formats.avro.{AvroDeserializationSchema, AvroSerializationSchema}
import org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema
import org.apache.flink.streaming.api.windowing.assigners.{ProcessingTimeSessionWindows, TumblingEventTimeWindows}
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.triggers.CountTrigger
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.Semantic
import org.apache.flink.streaming.connectors.kafka.internals.KeyedSerializationSchemaWrapper
import org.apache.flink.util.Collector

import java.lang
import java.text.DecimalFormat
import java.util.concurrent.TimeUnit
import scala.collection.convert.ImplicitConversions.`iterable AsScalaIterable`
import scala.util.Try
//import org.apache.flink.formats.avro.registry.confluent.{ConfluentRegistryAvroDeserializationSchema, ConfluentRegistryAvroSerializationSchema}

/**
 * Created with IntelliJ IDEA
 * <p>说明：</p>
 *
 * @author micheal
 * @see
 * @since 2020/04/01 0001 15:13
 */
object DataStreamKafka {


  //自定义一个类，从Kafka中读取键值对的数据
  class MyKafkaReader extends  KafkaDeserializationSchema[(String,Array[Byte])]{
    //是否流结束
    override def isEndOfStream(nextElement: (String, Array[Byte])): Boolean = {
      false
    }
    //反序列化
    override def deserialize(record: ConsumerRecord[Array[Byte], Array[Byte]]): (String, Array[Byte]) = {
      ( new String( record.key(),"utf8" ) ,record.value())
    }

    //指定类型
    override def getProducedType: TypeInformation[(String, Array[Byte])] ={
      createTuple2TypeInformation(createTypeInformation[ String],createTypeInformation[ Array[Byte]])
    }
  }


  val df = new DecimalFormat("#0.00")


  def createAvroProducerForTopic(topic: String): FlinkKafkaProducer[MixStream] = {

    val kafkaProps3 = new Properties()
    kafkaProps3.setProperty("zookeeper.connect", "localhost:2181" )
    kafkaProps3.setProperty("bootstrap.servers",  "localhost:9092")
    kafkaProps3.setProperty("group.id", "kg3")
    kafkaProps3.put("auto.offset.reset", "latest");
//    val as = new AvroSerializationSchema[MixStream](classOf[MixStream])
    val as = AvroSerializationSchema.forSpecific(classOf[MixStream])

    new FlinkKafkaProducer[MixStream](topic,as, kafkaProps3)

  }

  def main(args: Array[String]): Unit = {

    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    // 容错
//    env.enableCheckpointing(1000)
//    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
//    env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
//    env.getConfig.enableObjectReuse()
//    env.enableCheckpointing(1000L, CheckpointingMode.EXACTLY_ONCE)


    // kafka 配置
    val TOPIC_rtsp = "rtsp"
    val TOPIC_pickle = "pickle"
    val TOPIC_sink  = "sink"
    val kafkaProps1 = new Properties()
    kafkaProps1.setProperty("zookeeper.connect", "localhost:2181" )
    kafkaProps1.setProperty("bootstrap.servers",  "localhost:9092")
    kafkaProps1.setProperty("group.id", "kg1")
    kafkaProps1.put("auto.offset.reset", "latest");

    //增加kafkasource  数据源   kafka的消费者
    val ks1 = env.addSource(new FlinkKafkaConsumer(TOPIC_rtsp,  new MyKafkaReader , kafkaProps1))
//      .uid("s1").name("kafkaSource")

    import com.typesafe.sslconfig.ssl.FakeChainedKeyStore.User

    val kafkaProps2 = new Properties()
    kafkaProps2.setProperty("zookeeper.connect", "localhost:2181" )
    kafkaProps2.setProperty("bootstrap.servers",  "localhost:9092")
    kafkaProps2.setProperty("group.id", "kg2")
    kafkaProps2.put("auto.offset.reset", "latest");
    //获取 key 参考 : https://www.cnblogs.com/Springmoon-venn/p/11076636.html
    val ks2 = env.addSource(new  FlinkKafkaConsumer(TOPIC_pickle, new MyKafkaReader, kafkaProps2))
//      .uid("s2").name("kafkaSource").setParallelism(3)
      //df.format( AvroUtil.getDeserialize( schema, a._1).get(0)  )

//    val ks2 = env.addSource(new  FlinkKafkaConsumer(TOPIC_pickle,schema , kafkaProps))
//      .uid("s2").name("kafkaSource").setParallelism(3)


//    val minxSchema = AvroDeserializationSchema.forSpecific(classOf[MixStream])
//    val userSerialize : AvroSerializationSchema[Tweet] = new AvroSerializationSchema[Tweet](classOf[Tweet])

    //val ks3 = new FlinkKafkaProducer[String]("out",minxSchema,kafkaProps3)
//    val ks3 = new FlinkKafkaProducer("out",minxSchema, kafkaProps3, Semantic.AT_LEAST_ONCE)
    val flinkAvroKafkaProducer = createAvroProducerForTopic("out")


    //      new ObjSerializationSchema(topic),
    //      properties,
    //      FlinkKafkaProducer.Semantic.EXACTLY_ONCE);
    //    val schema = new TypeInformationKeyValueSerializationSchema(classOf[Array[Byte]], classOf[Array[Byte]], env.getConfig)
    //    class ByteArrayDeserializationSchema[T] extends AbstractDeserializationSchema[Array[Byte]]{
    //      @throws[IOException]
    //      override def deserialize(message: Array[Byte]): Array[Byte] = message
    //    }

    val k3 = ks1.coGroup(ks2).where(_._1).equalTo(_._1)
      .window(ProcessingTimeSessionWindows.withGap(Time.seconds(1)))
//      .window(TumblingEventTimeWindows.of(Time.of(1000, TimeUnit.MICROSECONDS)))
      .apply(new CoGroupFunction[(String,Array[Byte]),(String,Array[Byte]),MixStream]{

        //得到两个流中相同key的集合
        override def coGroup(k1: lang.Iterable[(String, Array[Byte])], k2: lang.Iterable[(String, Array[Byte])], collector: Collector[MixStream]): Unit = {
          try{
            val ms = new MixStream
            ms.setId(k1.head._1)
            ms.setStream( ByteBuffer.wrap(k1.head._2))
            ms.setDets( ByteBuffer.wrap(k2.head._2))
            collector.collect(  ms )
            println("ok ...")
          }catch {
            case ex: Exception =>{ println("Missing ...") }
          }
        }
      })

//println(k3)

//    val k3 = ks2.coGroup(ks1).where(_._1).equalTo(_._1)
//      .window(TumblingEventTimeWindows.of(Time.of(1, TimeUnit.SECONDS)))
//      .apply(new CoGroupFunction[(String,Array[Byte]),(String,Array[Byte]),Int]{
//
//        //得到两个流中相同key的集合
//        override def coGroup(k1: lang.Iterable[(String, Array[Byte])], k2: lang.Iterable[(String, Array[Byte])], collector: Collector[Int]): Unit = {
//          collector.collect(  1 )
//        }
//      })




    //    new FlinkKafkaProducer(kafkaTopic,
//      new ProducerStringSerializationSchema(kafkaTopic), kafkaProducerProperties, Semantic.AT_LEAST_ONCE)
//
//
//    new FlinkKafkaProducer011[Array[Byte]](
//      "out",
//      new KeyedSerializationSchemaWrapper[Array[Byte]](new TypeInformationSerializationSchema[Array[Byte]]()),
//      producerProps,
//      Optional.of(new FlinkFixedPartitioner[String]),
//      FlinkKafkaProducer011.Semantic.EXACTLY_ONCE,
//      10
//    )


    k3.addSink(flinkAvroKafkaProducer)
//    k3.print()
//    k3.addSink(new FlinkKafkaProducer(
//      "demo",
//      new TypeInformationSerializationSchema[Array[Byte]],
//      PropertiesProducer
//    ));
    println("start...")
    env.execute("stream word count")


  }

}
