package com.fwmagic.spark.streaming

import com.alibaba.fastjson.{JSON, JSONException}
import com.fwmagic.spark.streaming.bean.Order
import com.fwmagic.spark.streaming.util.{HbaseUtil, OffsetRangesUtils}
import java.util.ArrayList
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.{Connection, Put, Table}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext, TaskContext}

object ExactlyOnceResultAndOffsetSinkToHbase {
    def main(args: Array[String]): Unit = {
        //定义参数
        //true KafkaToHbase g110 myorder
        val Array(isLocal, appName, groupId, allTopics) = args

        val conf = new SparkConf().setAppName(this.getClass.getSimpleName)

        if (isLocal.toBoolean) {
            conf.setMaster("local[*]")
        }

        val sc = new SparkContext(conf)

        val ssc = new StreamingContext(sc, Seconds(5))

        val topics = allTopics.split(",")

        val bootstrapServers = "192.168.62.131:9092,192.168.62.132:9092,192.168.62.133:9092"
//        val bootstrapServers = "localhost:9092"

        //定义参数
        val kafkaParams = Map[String, String](
            ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers,
            ConsumerConfig.GROUP_ID_CONFIG -> groupId,
            ConsumerConfig.MAX_POLL_RECORDS_CONFIG -> "100",
            ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "earliest",
            ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
            //默认:true,会每5s自动提交一次偏移量到Kafka的特殊Topic【__consumer_offsets】中
            //自己管理偏移量，手动提交,实现Exactly-Once
            ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> "false",
            ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer"
        )


        //获取历史偏移量，以便启动程序后接着从上一次的位置继续消费
        val offsets: collection.Map[TopicPartition, Long] = OffsetRangesUtils.queryHistoryOffsetFromHbaseByPhoenix("myorder", groupId)

        val kafkaStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc
            , LocationStrategies.PreferConsistent
            , ConsumerStrategies.Subscribe[String, String](topics, kafkaParams, offsets))

        //遍历KafkaRDD,获取偏移量和数据
        kafkaStream.foreachRDD(rdd => {
            if (!rdd.isEmpty()) {
                //获取偏移量
                val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
                //获取RDD中的数据
                val lines: RDD[String] = rdd.map(_.value())

                val orderRDD: RDD[Order] = lines.map(line => {
                    var order: Order = null
                    try {
                        order = JSON.parseObject(line, classOf[Order])
                    } catch {
                        case e: JSONException => {
                            e.printStackTrace()
                            //TODO
                        }
                    }
                    order
                })
                //过滤
                val filtered: RDD[Order] = orderRDD.filter(_ != null)
                filtered.foreachPartition(iter => {
                    if (iter.nonEmpty) {
                        //先获取当前Task分区的编号，然后根据Task分区的编号，再获取当前分区的偏移量
                        //因为task的分区编号和topic的partition的编号是一一对应的
                        val offsetRange = offsetRanges(TaskContext.getPartitionId())

                        //这里是一个分区创建一个Connection
                        val connection: Connection = HbaseUtil.getConnection("192.168.62.131:2181,192.168.62.132:2181,192.168.62.133:2181")
                        val myOrderTable: Table = connection.getTable(TableName.valueOf("myorder"))

                        //定义一个集合，将数据先缓存到集合中
                        val puts = new ArrayList[Put]()

                        //迭代分区中的每一条数据
                        iter.foreach(order => {
                            //new 一个Put,就是Hbase的一行
                            val put: Put = new Put(Bytes.toBytes(order.oid))
                            //添加列的数据
                            put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("total_money"), Bytes.toBytes(order.totalMoney))

                            //如果是一个批次中的最后一条数据，就将偏移量等信息和结果数据写入到Hbase的同一行中(同一行本身就是一个事物操作)
                            if (!iter.hasNext) {
                                val topic: String = offsetRange.topic
                                val partition: Int = offsetRange.partition
                                val offset: Long = offsetRange.untilOffset
                                put.addColumn(Bytes.toBytes("offset"), Bytes.toBytes("groupid"), Bytes.toBytes(groupId))
                                put.addColumn(Bytes.toBytes("offset"), Bytes.toBytes("topic_partition"), Bytes.toBytes(topic + "_" + partition))
                                put.addColumn(Bytes.toBytes("offset"), Bytes.toBytes("offset"), Bytes.toBytes(offset))
                            }
                            puts.add(put)
                            //每10条写一次
                            if (puts.size() % 10 == 0) {
                                myOrderTable.put(puts)
                                puts.clear()
                            }
                        })

                        //批量写入
                        myOrderTable.put(puts)
                        //关闭hbase的table
                        myOrderTable.close()
                        //关闭hbase的连接
                        connection.close()
                    }
                })
            }
        })

        //启动任务
        ssc.start()

        ssc.awaitTermination()
    }

}
