package com.zyh.study.ForestPlus;


import com.dtflys.forest.http.ForestSSE;
import com.dtflys.forest.interceptor.SSEInterceptor;
import com.dtflys.forest.sse.EventSource;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.f4b6a3.ulid.Ulid;
import jakarta.annotation.PostConstruct;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;

import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

/**
 * @Classname AlpacaNonSseDeal
 * @Description TODO()
 * @Date 2025-09-08 22:03
 * @Created by 86177
 */
@Component
@EnableScheduling
@Slf4j
public class AlpacaNonSseDeal implements SSEInterceptor, ApplicationContextAware {
    private final ObjectMapper objectMapper = new ObjectMapper();

    private final String startEventId = "00000000000000000000000000";

    // 当前的SSE连接
    protected static volatile ForestSSE currentSSE;
    // 存储的已经处理过的最新的event_id
    private static final String LAST_PROCESSED_ID_KEY = "alpaca:NTA:last_processed_id";
    // 当前已经处理过的事件，确保幂等性设置
    private static final String EVENT_KEY_PREFIX = "alpaca:NTA:event:";
    // 消费失败的事件
    private static final String FAIL_EVENT_KEY = "alpaca:NTA:failEvent:";

    private ApplicationContext applicationContext;

    private AlpacaNonSseDeal alpacaNonSseDeal;

    @Autowired
    private RedissonCacheService cacheService;

    @Autowired
    private AlpacaMessageListener alpacaMessageListener;

    @Override
    public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
        this.applicationContext = applicationContext;
    }

    @PostConstruct
    public void init() {
        this.alpacaNonSseDeal = applicationContext.getBean(AlpacaNonSseDeal.class);
        createSSE();
    }

    /**
     * 创建sse消息连接
     */
    public void createSSE() {
        if (currentSSE != null) {
            return;
        }
        String lastProcessedId = initializeLastProcessedId();
        try {
            // 连接订阅订单更新sse
            alpacaMessageListener.subscribeToNonTradingActivities(
                    null,
                    null,
                    null,
                    null,
                    null,
                    lastProcessedId,
                    null,
                    null,
                    null,
                    alpacaNonSseDeal
                    );
        } catch (Exception e) {
            log.error("AlpacaSseDeal.createSSE()，与alpaca创建SSE消息连接异常，error: {}", e.getMessage(), e);
        }
    }

    /**
     * 初始化最后处理的事件ID
     *
     * @return 最后处理的事件ID
     */
    private String initializeLastProcessedId() {
        if (!cacheService.exists(LAST_PROCESSED_ID_KEY)) {
            // 初始化为最小ULID: 00000000000000000000000000
            cacheService.set(LAST_PROCESSED_ID_KEY, startEventId);
        }
        return cacheService.get(LAST_PROCESSED_ID_KEY, String.class);
    }

    /**
     * 处理SSE消息
     */
    private void handleSSEMessage(TransactionRecordDTO event) throws JsonProcessingException {
        if (event == null || event.getEventUlid() == null) {
            return;
        }

        try {
            Ulid currentUlid = Ulid.from(event.getEventUlid());
            Ulid lastUlid = getLastProcessedUlid();
            int comparison = currentUlid.compareTo(lastUlid);

            // 这里应该先检查，是否已处理过该事件，如果没有处理过就加入redis中
            // 幂等检查，前缀 + eventId为唯一的Key
            String eventKey = EVENT_KEY_PREFIX + event.getEventUlid();

            // 使用 trySet 确保幂等性
            if (!cacheService.trySet(eventKey, objectMapper.writeValueAsString(event), 7, TimeUnit.DAYS)) {
                log.debug("当前事件已经处理，跳过当前事件 {}", event.getEventUlid());
                return;
            }

            // TODO 业务处理


            if (comparison > 0) {
                try {
                    // 更新最后处理ID
                    updateLastProcessedId(event.getEventUlid());
                } catch (Exception e) {
                    log.warn("更新最后处理ID失败: {}", event.getEventUlid(), e);
                }
            }
        } catch (Exception e) {
            log.error("当前事件处理失败，事件ID: {}", event.getEventUlid(), e);
            // TODO 处理失败之后，记录处理失败的event_id
            // 拿到当前失败的
            String failEventId = cacheService.get(FAIL_EVENT_KEY, String.class);
            if (failEventId == null){
                cacheService.trySet(FAIL_EVENT_KEY,objectMapper.writeValueAsString(event.getEventUlid()), 7, TimeUnit.DAYS);
            }else {
                // TODO 比较这两个eventId的值，取最小的
                if (Ulid.from(failEventId).compareTo(Ulid.from(event.getEventUlid())) > 0) {
                    cacheService.trySet(FAIL_EVENT_KEY,objectMapper.writeValueAsString(event.getEventUlid()), 7, TimeUnit.DAYS);
                }
            }
        }
    }

    /**
     * 获取最后处理的ULID
     */
    private Ulid getLastProcessedUlid() {
        String lastId = cacheService.get(LAST_PROCESSED_ID_KEY, String.class);
        return Ulid.from(lastId);
    }

    /**
     * 更新最后处理的ID
     */
    public void updateLastProcessedId(String eventId) {
        cacheService.set(LAST_PROCESSED_ID_KEY, eventId);
    }

    /**
     * 定时任务：每小时执行补偿
     * 这里的补偿消费失败了，需要重新处理
     * 如果宕机了，那么是以redis中的缓存为准，发送sse
     */
    @Scheduled(fixedRate = 60 * 60 * 1000) // 每小时
    public void scheduledCompensation() {
        // 当前的补偿机制应该为 推送：最早失败的事件 -> 当前存储的最新的事件
        log.info("开始执行事件补偿任务");
        try {
            // 查找当前缓存中失败的事件
            String failEventId = cacheService.get(FAIL_EVENT_KEY, String.class);
            // 存在失败事件，那么就推送：失败事件~最新事件
            if (failEventId != null) {
                // 获取最新事件
                String latestEventId = cacheService.get(LAST_PROCESSED_ID_KEY, String.class);
                // 推送：失败事件~最新事件
                alpacaMessageListener.subscribeToNonTradingActivities(
                        null,
                        null,
                        null,
                        null,
                        null,
                        failEventId,
                        latestEventId,
                        null,
                        null,
                        alpacaNonSseDeal
                );
            }
        } catch (Exception e) {
            log.error("执行事件补偿任务时发生错误", e);
        }
        log.info("事件补偿任务执行完成");
    }

    // SSE事件处理
    @Override
    public void onSSEOpen(EventSource eventSource) {
        try {
            //将传入的 eventSource 对象中持有的 ForestSSE 实例赋值给静态变量 currentSSE，用于保存当前已建立的 SSE 连接实例，以便后续复用或管理
            currentSSE = eventSource.sse();
            log.info("SSE connection opened");
        } catch (Exception e) {
            log.warn("更新连接状态为CONNECTED失败", e);
        }
    }

    @Override
    public void onSSEClose(EventSource eventSource) {
        currentSSE = null;
        log.warn("SSE connection closed");
    }

    @Override
    public void onMessage(EventSource event, String name, String value) {
        try {
            TransactionRecordDTO transactionRecordDTO = objectMapper.readValue(value, TransactionRecordDTO.class);
            log.debug("Received SSE event: {}", transactionRecordDTO.getEventUlid());
            handleSSEMessage(transactionRecordDTO);
        } catch (JsonProcessingException e) {
            log.error("SSE message parsing error: {}", value, e);
        }
    }
}
