package com.sia.springcloud.service.impl;

import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.core.toolkit.StringUtils;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.sia.springcloud.kafka.DecodeingKafka;
import com.sia.springcloud.mapper.DeviceMapper;
import com.sia.springcloud.mapper.PointMapper;
import com.sia.springcloud.model.Device;
import com.sia.springcloud.model.Point;
import com.sia.springcloud.result.DataCollection;
import com.sia.springcloud.result.GridDataResult;
import com.sia.springcloud.result.RealTimeData;
import com.sia.springcloud.service.IDataShowService;
import com.sia.springcloud.result.ResultUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.poi.ss.formula.functions.T;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.ApplicationArguments;
import org.springframework.boot.ApplicationRunner;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.jms.annotation.JmsListener;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import javax.jms.JMSException;
import javax.jms.ObjectMessage;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;

/**
 * @Author: Sirius
 * @Date: 2022/3/2 14:24
 * @Description: 实时数据展示接口实现类
 **/
@Slf4j
@Service
public class DataShowService implements IDataShowService, ApplicationRunner {


    @Resource
    private PointMapper pointMapper;

    @Value("${topic.name}")
    private String TopicName;



    private static volatile ConcurrentHashMap<Integer, DataCollection> realTimeCache = new ConcurrentHashMap<>();



    /**
     * 更新数据
     *
     * @param dataCollection
     */
    public void updateData(DataCollection dataCollection) {



        //对数据进行更新

        if (dataCollection != null && dataCollection.isSuccess()) {
            if (realTimeCache.containsKey(dataCollection.getPointId())) {
                realTimeCache.replace(dataCollection.getPointId(), dataCollection);
            } else {
                realTimeCache.put(dataCollection.getPointId(), dataCollection);
            }
        }


    }


    /**
     * 根据协议类型和设备id查询对应的设备中点位的实时数据进行返回
     * <p>
     * 里面应该有一个时间字段用于监控时间是否超时
     * <p>
     * 这里存储的类型可以不改变，也就是和保存的时候一致，但是返回的类型需要从新包装到其他类中
     * <p>
     * 一个问题：
     * 要不要实现实时数据刷新，如果需要，那么资源消耗还是挺大的
     * 如果不需要，那么查看的时候就要手动来进行刷新
     *
     * @param deviceId
     * @return
     */
    public String showRealTimeData(Integer start,Integer limit,String deviceId) {
        log.info("查询点位，deviceId:{}，limit:{}，start:{}", deviceId, limit, start);

        QueryWrapper<Point> pointQueryWrapper = new QueryWrapper<>();
        pointQueryWrapper.eq("device_id", deviceId);
        Integer total = pointMapper.selectCount(pointQueryWrapper);

        Page<Point> pointPage = new Page<>();
        pointPage.setTotal(total);
        pointPage.setCurrent(start);
        pointPage.setSize(limit);


        IPage<Point> pointIPage = pointMapper.selectPage(pointPage, pointQueryWrapper);
        List<Point> pointList = pointIPage.getRecords();



        System.out.println("分页查询的数据的数量："+pointList.size());


        List<RealTimeData> realTimeDataList = new ArrayList<>();

        if (pointList != null && pointList.size() != 0) {

            for (Point point : pointList) {

                RealTimeData realTimeData = new RealTimeData();
                realTimeData.setPointId(point.getId());
                realTimeData.setPointName(point.getPointName());


                if (realTimeCache.containsKey(point.getId())) {
                    DataCollection cacheDataCollection = realTimeCache.get(point.getId());

                    if(StringUtils.isNotEmpty( cacheDataCollection.getType())){
                        realTimeData.setType(cacheDataCollection.getType());
                    }else{
                        realTimeData.setType("");
                    }
                    if(StringUtils.isNotEmpty( cacheDataCollection.getValue())){
                        realTimeData.setValue(cacheDataCollection.getValue());
                    }else{
                        realTimeData.setValue("");
                    }
                    realTimeData.setDate(String.valueOf(cacheDataCollection.getDate().getTime()));

                    realTimeDataList.add(realTimeData);
                }else{
                    realTimeData.setType("");
                    realTimeData.setValue("");

                    // 这里的时间设置有误
                    realTimeData.setDate(String.valueOf(System.currentTimeMillis()));
                    //realTimeData.setDate("");

                    realTimeDataList.add(realTimeData);
                }

                //// 演示使用
                //realTimeData.setType("Mpa");
                //double randomNum = 100+Math.random()*100;
                ////realTimeData.setValue(String.valueOf(randomNum));
                //realTimeData.setValue(String.format("%.2f", randomNum));
                //
                //// 这里的时间设置有误
                //realTimeData.setDate(String.valueOf(System.currentTimeMillis()));
                ////realTimeData.setDate("");
                //
                //realTimeDataList.add(realTimeData);

            }
        }

        log.info("查询到的设备列表数量为 {} 条，本次返回的设备数量为 {}",total,realTimeDataList.size());

        // 包装好，然后返回
        GridDataResult<RealTimeData> realTimeDataListResult = new GridDataResult<>();

        realTimeDataListResult.setTotal(total);
        realTimeDataListResult.setRoot(realTimeDataList);
        return JSON.toJSONString(realTimeDataListResult);
    }

    @Override
    public void run(ApplicationArguments args) throws Exception {
        KafkaConsumer<String, Object> consumer;

        Properties props = new Properties();

        //kafka消费的的地址
//        props.put("bootstrap.servers", "172.20.3.81:9092");
        props.put("bootstrap.servers", "127.0.0.1:9092");
        //组名 不同组名可以重复消费
        props.put("group.id", "etl");
        //是否自动提交
        props.put("enable.auto.commit", "false");
        //超时时间
        props.put("session.timeout.ms", "30000");
        //一次最大拉取的条数
        props.put("max.poll.records", 5000);
//		earliest当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，从头开始消费
//		latest
//		当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，消费新产生的该分区下的数据
//		none
//		topic各分区都存在已提交的offset时，从offset后开始消费；只要有一个分区不存在已提交的offset，则抛出异常
        props.put("auto.offset.reset", "earliest");
        //序列化
        props.put("key.deserializer", StringDeserializer.class.getName());
        props.put("value.deserializer", DecodeingKafka.class.getName());
        consumer = new KafkaConsumer<String, Object>(props);
        //订阅主题列表topic
        consumer.subscribe(Arrays.asList(this.TopicName));

        System.out.println("Kafka初始化!");

        System.out.println("---------开始消费---------");


        ConsumerRecords<String, Object> msgList;
        // 循环执行
        while (true) {

            try {
                // 通过Kafka消费者拉取消息，如果拉取的消息集合不为空
                msgList = consumer.poll(100);

                if (null != msgList) {

                    // 遍历集合中的每条记录
                    // 将记录的值添加到list中
                    // 将记录的偏移量offset添加到list2中
                    for (ConsumerRecord<String, Object> record : msgList) {

                        DataCollection dataCollection =  (DataCollection) record.value();

                        updateData(dataCollection);

                        if(record.offset()%5000==0){
                            log.info("这是接收的第"+record.offset()+"条消息");
                        }
                    }

                    // 业务处理成功后，手动提交
                    consumer.commitSync();

                } else {
                    Thread.sleep(1000);
                }
            } catch (Exception e) {
                e.printStackTrace();
                log.info(e.getMessage());
            }
        }
    }




}
