package com.test.service;

import com.test.Utils.StockTradeMapper;
import com.test.entity.StockTrade;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.AggregateFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import redis.clients.jedis.Jedis;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.time.Duration;
import java.util.Properties;

@Slf4j
public class TradeCal {
    public static FlinkJedisPoolConfig redisConfig = new FlinkJedisPoolConfig.Builder()
            .setHost("192.168.43.150")
            .setPort(6379)
            .build();

    public static void createDataStream(StreamExecutionEnvironment env) {
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "192.168.88.135:19092,192.168.88.135:29092,192.168.88.135:39092");
        properties.setProperty("group.id", "trade-cal");

        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<>(
            "stock-trades-",
            new SimpleStringSchema(),
            properties
        );
        consumer.setStartFromEarliest();

        // 创建数据流并添加水印
        DataStream<StockTrade> tradeStream = env
                .addSource(consumer)
                .map(new StockTradeMapper())
                .assignTimestampsAndWatermarks(WatermarkStrategy
                        .<StockTrade>forBoundedOutOfOrderness(Duration.ofSeconds(5))
                        .withTimestampAssigner((event, timestamp) -> System.currentTimeMillis())
                );


        // 每秒的处理数据个数
        tradeStream
                .map(new MapFunction<StockTrade, Long>() {
                    @Override
                    public Long map(StockTrade value) throws Exception {
                        return 1L;
                    }
                })
                .windowAll(TumblingEventTimeWindows.of(Time.seconds(1)))
                .aggregate(new ProcessingSpeedAggregator())
                .addSink(new ProcessingSpeedRedisSink(redisConfig));

        log.info("Trade calculation analysis configured");
    }

    public static class ProcessingSpeedRedisSink extends RichSinkFunction<Long> {
        private transient Jedis jedis;
        private final FlinkJedisPoolConfig config;

        public ProcessingSpeedRedisSink(FlinkJedisPoolConfig config) {
            this.config = config;
        }

        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            jedis = new Jedis(config.getHost(), config.getPort());
        }

        @Override
        public void close() throws Exception {
            if (jedis != null) {
                jedis.close();
            }
            super.close();
        }

        @Override
        public void invoke(Long value, Context context) throws Exception {
            System.out.println("插入数据中");


            // 使用时间戳作为键的一部分，确保每个键都是唯一的
            long timestamp = context.currentProcessingTime();
            String key = "processing_speed_"; // 存储处理速度的键

            // 直接将当前窗口的计数值设置到 Redis 中
            jedis.rpush(key, String.valueOf(value));
        }
    }


    // 定义聚合函数
    public static class ProcessingSpeedAggregator implements AggregateFunction<Long, Long, Long> {

        @Override
        public Long createAccumulator() {
            return 0L;
        }

        @Override
        public Long add(Long value, Long accumulator) {
            System.out.println("进入add");
            return accumulator + value; // 每次调用 add 方法时，计数加 1
        }

        @Override
        public Long getResult(Long accumulator) {

            System.out.println("进入getresult");return accumulator;
        }

        @Override
        public Long merge(Long a, Long b) {
            return a + b; // 合并两个计数
        }
    }
}
