package com.xj.center.data.service.kafka;

import com.alibaba.fastjson.JSONObject;
import com.xj.center.data.service.DataCustomService;
import com.xj.center.data.service.PointValueService;
import com.xj.center.data.service.job.PointValueScheduleJob;
import com.xj.common.bean.constant.Common;
import com.xj.common.bean.point.PointValue;
import com.xj.common.utils.JsonPaseUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

import javax.annotation.Resource;
import javax.websocket.Session;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadPoolExecutor;

/**
* 类说明 
*/
@Slf4j
@Component
public class PointValueConsumer {
    @Value("${data.point.batch.speed}")
    private Integer batchSpeed;
    @Resource
    private PointValueService pointValueService;
    @Resource
    private DataCustomService dataCustomService;
    @Resource
    private ThreadPoolExecutor threadPoolExecutor;
    // 消费监听
    @KafkaListener(topics = {Common.KAFKA.TOPIC_EXCHANGE_VALUE},groupId = "${pointValueGroup}")
    public void onMessage(ConsumerRecord<?, ?> record, Acknowledgment ack){
        // 消费的哪个topic、partition的消息,打印出消息内容
        System.out.println("PointValueConsumer消费："+record.topic()+"-"+record.partition()+"-"+record.value());
        String jsonstr = record.value().toString();
        PointValue pointValue = JsonPaseUtil.JSONStrToObj(jsonstr, PointValue.class);

        if (null == pointValue || null == pointValue.getDeviceId()) {
            log.error("Invalid point value: {}", pointValue);
            return;
        }
        PointValueScheduleJob.valueCount.getAndIncrement();
        log.debug("Point value, From: {}, Received: {}", pointValue);

        // pre handle
        dataCustomService.preHandle(pointValue);

        // Judge whether to process data in batch according to the data transmission speed
        if (PointValueScheduleJob.valueSpeed.get() < batchSpeed) {
            threadPoolExecutor.execute(() -> {
                // Save point value to Redis & MongoDB
                pointValueService.savePointValue(pointValue);
            });
        } else {
            // Save point value to schedule
            PointValueScheduleJob.valueLock.writeLock().lock();
            PointValueScheduleJob.pointValues.add(pointValue);
            PointValueScheduleJob.valueLock.writeLock().unlock();
        }

        // after handle
        dataCustomService.afterHandle(pointValue);

        //提交偏移量
        ack.acknowledge();
    }

}
