package com.atguigu.gmall.canal

import java.net.InetSocketAddress
import java.util

import com.alibaba.fastjson.JSONObject
import com.alibaba.otter.canal.client.{CanalConnector, CanalConnectors}
import com.alibaba.otter.canal.protocol.CanalEntry.{EntryType, EventType, RowChange}
import com.alibaba.otter.canal.protocol.{CanalEntry, Message}
import com.atguigu.realtime.gmall.common.Constant
import com.google.protobuf.ByteString

import scala.collection.JavaConverters._
import scala.util.Random

/**
 * @author Shelly An
 * @create 2020/9/5 14:35
 *         从canal服务器的某个实例中读取数据
 */
object CanalClient {

  def handleRowData(rowDataList: util.List[CanalEntry.RowData],
                    eventType: EventType,
                    tableName: String): Unit = {
    if (rowDataList.size() > 0 && tableName == "order_info" && eventType == EventType.INSERT) {
      handleData(rowDataList, Constant.ORDER_INFO_TOPIC)
    } else if (rowDataList.size() > 0 && tableName == "order_detail" && eventType == EventType.INSERT) {
      handleData(rowDataList, Constant.ORDER_DETAIL_TOPIC)
    }
  }

  private def handleData(rowDataList: util.List[CanalEntry.RowData], topic: String) = {
    for (rowData <- rowDataList.asScala) {
      val obj = new JSONObject
      val columns: util.List[CanalEntry.Column] = rowData.getAfterColumnsList
      for (column <- columns.asScala) {
        obj.put(column.getName, column.getValue)
      }
      println(obj.toString)
      //写到kafka
      //1.创建一个生产者 2.写
      //      MyKafkaUtil.send(topic, obj.toJSONString)

      //模拟随机延迟
      new Thread() {
        override def run(): Unit = {
          Thread.sleep(new Random().nextInt(9 * 1000))
          MyKafkaUtil.send(topic, obj.toJSONString)
        }
      }.start()
    }
  }

  def main(args: Array[String]): Unit = {
    //1. 连接到canal 集群用cluster
    val addr = new InetSocketAddress("hadoop102", 11111)
    val conn: CanalConnector = CanalConnectors.newSingleConnector(addr, "example", "", "")
    //发出连接
    conn.connect()
    //订阅数据库和表 实例监控的不一定都是我们需要的，这里要设定我们需要的
    conn.subscribe("gmall0421.*")

    //2. 获取数据 拉取数据，最多拉取100条sql导致变化的数据 batchSize越大，效率越高，对kafka压力越大
    while (true) {
      val msg: Message = conn.get(100)
      val entries: util.List[CanalEntry.Entry] = msg.getEntries
      //虽然不会为null，但实际生产中为了避免空指针异常，一般判断写成这样
      if (entries != null && !entries.isEmpty) {
        for (entry <- entries.asScala) {
          //字符串数组--是字节
          if (entry != null && entry.hasEntryType && entry.getEntryType == EntryType.ROWDATA) {
            val storeValue: ByteString = entry.getStoreValue
            val rowChange: RowChange = RowChange.parseFrom(storeValue)
            val rowDataList: util.List[CanalEntry.RowData] = rowChange.getRowDatasList
            //只处理order_info表，只关注插入事件（因为订单表可能会状态更新，而我们只关注销售额）
            handleRowData(rowDataList, rowChange.getEventType, entry.getHeader.getTableName)
          }

        }
      } else {
        print("数据没有变化，3s后继续...")
        Thread.sleep(3000)
      }
    }
    //3. 解析数据
  }
}
