package com.rundow.data.listener;


import com.rundow.common.base.bean.point.PointValue;
import com.rundow.common.base.model.event.PointValueEvent;
import com.rundow.data.service.PointValueService;
import com.rundow.data.service.RepositoryHandleService;
import com.rundow.data.service.job.PointValueScheduleJob;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.event.EventListener;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;

import javax.annotation.Resource;
import java.util.concurrent.ThreadPoolExecutor;

/**
 * 接收驱动发送过来的数据
 * <p>
 * 200万条SinglePointValue会产生：60M的索引数据以及400M的数据
 *
 *
 */
@Slf4j
@Component
public class PointValueListener {


    @Value("${data.point.batch.speed}")
    private Integer batchSpeed;

    @Resource
    private PointValueService pointValueService;
    @Resource
    private RepositoryHandleService repositoryHandleService;
    @Resource
    private ThreadPoolExecutor threadPoolExecutor;
    @Async
    @EventListener
    public void pointValueReceive(  PointValueEvent pointValueEvent) {
        final PointValue pointValue = pointValueEvent.getPointValue();
        if (null == pointValue || null == pointValue.getDeviceId()) {
                log.error("Invalid point value: {}", pointValue);
                return;
            }
            PointValueScheduleJob.valueCount.getAndIncrement();
            log.debug("Point value, From: {}, Received: {}",pointValue.getDeviceId() , pointValue);

            // Judge whether to process data in batch according to the data transmission speed
            if (PointValueScheduleJob.valueSpeed.get() < batchSpeed) {
                threadPoolExecutor.execute(() -> {
                    // Save point value to Redis & MongoDB
                    pointValueService.savePointValue(pointValue);
                });
            } else {
                // Save point value to schedule
                PointValueScheduleJob.valueLock.writeLock().lock();
                PointValueScheduleJob.pointValues.add(pointValue);
                PointValueScheduleJob.valueLock.writeLock().unlock();
            }

    }
}
