package com.tech.customer

import com.tech.common.{KafkaUtils, KuduUtil}
import com.tech.process.EventProcess
import org.apache.spark.sql.ForeachWriter

class MyForeachWriter(manager: LoadResourceManager, broker: String, kuduMaster: String) extends ForeachWriter[(String, String, Int, Long)] with Serializable {

  override def open(partitionId: Long, version: Long): Boolean = {
    println(s"开始--------------$partitionId-------------------$version")
    KafkaUtils.init(broker)
    KuduUtil.kuduMaster = kuduMaster
    true
  }

  override def process(value: (String, String, Int, Long)): Unit = {

    val retaileventPattern = "(retailevent.*)".r
    val customerPattern = "(customer.*)".r

    value._2 match {
      case retaileventPattern(_) =>
        println(value._1)
        try {
          EventProcess.retaileventProcess(value._1, manager.retaileventSchemaMapBroadcast.value, manager.ruleMapBroadcast.value, manager.customerSchemaMapBroadcast.value)
        }
        catch {
          case e: Exception =>
            println(e.getMessage)
            println("retailevent处理失败：" + value._1)
            KafkaUtils.write2Kafka("errorMsg", value._1)
        }
      case customerPattern(_) =>
        try {
          EventProcess.customerProcess(value._1, manager.customerSchemaMapBroadcast.value)
        }
        catch {
          case e: Exception =>
            println(e.getMessage)
            println("customer处理失败：" + value._1)
            KafkaUtils.write2Kafka("errorMsg", value._1)
        }
      case _ => println("其他消息")
    }

    //消息原样发送到kafka
    KafkaUtils.write2Kafka(value._2 + "_resend", value._1)
    //offset写入kudu
    KuduUtil.upsertOffset(value._2, value._3, value._4)


  }

//  override def process(value: (String, String, Int, Long)): Unit = {
//
//    println(value._1)
//
//  }

  override def close(errorOrNull: Throwable): Unit = {
    println("结束---------------------------------")
  }
}