package com.ruyuan.event.streaming.process;
import com.ruyuan.event.streaming.pojo.EventClientLog;
import com.ruyuan.event.streaming.pojo.EventJoinLog;
import com.ruyuan.event.streaming.pojo.EventServerLog;
import com.ruyuan.event.streaming.utils.Constants;
import com.ruyuan.event.streaming.utils.HBaseUtils;
import com.ruyuan.event.streaming.utils.KafkaProducerUtils;
import com.ruyuan.event.streaming.utils.RedisUtils;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.util.Collector;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.kafka.clients.producer.Producer;
import redis.clients.jedis.Jedis;

public class EventClientLogRichFlatMap extends RichFlatMapFunction<EventClientLog, EventJoinLog> {
    String tableName = Constants.HBASE_EVENT_SERVER_LOG_TABLE;
    HTable hTable;
    Jedis jedis;
    Producer producer;

    @Override
    public void open(Configuration parameters) throws Exception {
        hTable = HBaseUtils.initHbaseClient(tableName);
        jedis = RedisUtils.getJedis();
        producer = KafkaProducerUtils.getProducer();
        super.open(parameters);
    }

    @Override
    public void flatMap(EventClientLog eventClientLog, Collector<EventJoinLog> out) {
        //生成key getRequestId + getEventId + getEventUserId
        byte[] key = ProcessETLUtils.generateBytesKey(eventClientLog);

        EventServerLog eventServerContext = ProcessETLUtils.getServerContext(jedis,hTable,key);
        /**
         * 构建EventJoin对象，将client+server进行拼接生成eventJoin.
         * 如果缓存和hbase没有命中serverLog，只会拼接客户端信息.
         * 需要发送到重试服务后续再进行拼接.
         * */
        EventJoinLog eventJoinLog = ProcessETLUtils.buildEventServerLog(eventClientLog,eventServerContext);
        if (eventServerContext == null) {
            //进行重试的逻辑处理，写入重试kafka
            ProcessETLUtils.retry(producer,eventJoinLog.toByteArray());
        }else {
            out.collect(eventJoinLog);
        }
    }
}
