package com.example.performance.r2dbc.compare;

import com.example.performance.r2dbc.entity.JDBCRowMapper;
import com.example.performance.r2dbc.entity.RecordDb;
import com.example.performance.r2dbc.utils.PerformanceCollector;
import com.example.performance.r2dbc.utils.TestSuite;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.reactivestreams.Publisher;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.datasource.DataSourceUtils;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.StopWatch;
import reactor.core.publisher.Flux;
import reactor.core.publisher.FluxSink;
import reactor.core.scheduler.Schedulers;

import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.BaseStream;
import java.util.stream.Stream;

import static com.example.performance.r2dbc.utils.TestSuite.prepareSerialTestSuite;

/**
 * @author zpMeng
 * @since 6/30/2025
 */
@Slf4j
@Service
@RequiredArgsConstructor
@Transactional(readOnly = true)
public class JdbcStreamingService {

    private final JdbcTemplate jdbcTemplate;
    private final StreamService streamService;

    public void testStreamQuerySerial() {
        Map<Long, Integer> nodeSizeToCount = prepareSerialTestSuite();
        System.out.println(nodeSizeToCount);

        for (Map.Entry<Long, Integer> entry : nodeSizeToCount.entrySet()) {
            Long nodeSize = entry.getKey();
            Integer testTime = entry.getValue();

            ArrayList<Double> durations = new ArrayList<>();
            for (int i = 0; i < testTime; i++) {
                StopWatch stopWatch = new StopWatch();
                stopWatch.start();

                Long block = executeOneFluxQueryByStream(0, nodeSize)
                        .limitRate(500)
                        .count()
                        .subscribeOn(Schedulers.boundedElastic())
                        .block();
                System.out.println("totalCount: " + block);

                stopWatch.stop();

                durations.add(stopWatch.getTotalTimeSeconds());
            }

            PerformanceCollector.printSerial(Map.of(nodeSize, durations));
        }
    }

    public void testStreamQueryParallel(int start, int end, int nodeSize) throws Exception {
        Map<Integer, Integer> concurrencyToCount = TestSuite.prepareParallelTestSuite(start, end, nodeSize);
        System.out.println(concurrencyToCount);

        for (Map.Entry<Integer, Integer> entry : concurrencyToCount.entrySet()) {
            Integer concurrency = entry.getKey();
            Integer testTimes = entry.getValue();

            executeOneParallelTest(concurrency, testTimes, nodeSize);
        }
    }

    public void executeOneParallelTest(int concurrency, int testTimes, long nodeSize) throws Exception {
        ArrayList<Double> totalDurations = new ArrayList<>();

        for (int i = 0; i < testTimes; i++) {
            try (ExecutorService executorService = Executors.newFixedThreadPool(concurrency)) {
                CountDownLatch latch = new CountDownLatch(concurrency);
                List<Future<Double>> futures = new ArrayList<>(concurrency);

                for (int j = 0; j < concurrency; j++) {
                    int version = j % 20;
                    Future<Double> futureTask = executorService
                            .submit(() -> {
                                try {
                                    StopWatch stopWatch = new StopWatch();
                                    stopWatch.start();
                                    executeOneFluxQueryByStream(version, nodeSize)
                                            .subscribeOn(Schedulers.boundedElastic())
                                            .blockLast();
                                    stopWatch.stop();
                                    return stopWatch.getTotalTimeSeconds();
                                } finally {
                                    latch.countDown();
                                }
                            });
                    futures.add(futureTask);
                }

                latch.await();
                for (Future<Double> future : futures) {
                    totalDurations.add(future.get());
                }
            }
        }

        PerformanceCollector.printParallel(totalDurations, concurrency);
    }

    /**
     * OOM
     */
    public Flux<RecordDb> executeOneFluxQueryByStream(int version, Long nodeSize) {
        String sql = "select * from DS_AGG_427_%d where node_id <= %d".formatted(version, nodeSize);
        return streamService.execute(sql);
    }

    /**
     * OOM
     */
    public Flux<RecordDb> executeOneFluxQueryMethod2(int version, long nodeSize) {
        String sql = "select * from DS_AGG_427_%d where node_id <= %d".formatted(version, nodeSize);
        Stream<RecordDb> dataStream = jdbcTemplate.queryForStream(
                sql,
                new JDBCRowMapper());

        return Flux.using(
                () -> dataStream,                 // 资源提供
                Flux::fromStream,                 // 流转换为 Flux
                BaseStream::close         // 资源释放（关闭 ResultSet）
        );
    }

    /**
     * OOM
     */
    public Flux<RecordDb> executeOneFluxQueryMethod3(int version, long nodeSize) {
        String sql = "select * from DS_AGG_427_%d where node_id <= %d".formatted(version, nodeSize);
        AtomicReference<Connection> con = new AtomicReference<>();
        return Flux.defer(() -> {
                    // 获取当前事务连接（必须在事务线程内）
                    con.set(DataSourceUtils.getConnection(jdbcTemplate.getDataSource()));

                    return Flux.create((Consumer<FluxSink<RecordDb>>) fluxSink -> {
                                try (PreparedStatement ps = createStreamingStatement(con.get(), sql);
                                     ResultSet rs = ps.executeQuery()) {

                                    while (!fluxSink.isCancelled() && rs.next()) {
                                        // 关键2：逐行转换，不缓存整个结果集
                                        JDBCRowMapper jdbcRowMapper = new JDBCRowMapper();
                                        jdbcRowMapper.mapRow(rs, 0);
                                        RecordDb record = jdbcRowMapper.mapRow(rs, 0);
                                        fluxSink.next(record);
                                    }
                                    fluxSink.complete();
                                } catch (SQLException e) {
                                    fluxSink.error(e);
                                }
                            }
                    );
                })
                // 关键3：切换到阻塞线程池（避免阻塞EventLoop）
                .subscribeOn(Schedulers.boundedElastic())
                // 关键4：结束时释放连接到事务管理器
                .doFinally(signal -> DataSourceUtils.releaseConnection(con.get(), jdbcTemplate.getDataSource()));
    }

    /**
     * OOM
     */
    public Flux<RecordDb> executeOneFluxQueryMethod4(int version, long nodeSize) {
        String sql = "select * from DS_AGG_427_%d where node_id <= %d".formatted(version, nodeSize);
        JDBCRowMapper jdbcRowMapper = new JDBCRowMapper();
        return Flux.using(
                        // 1. 资源获取：获取数据库连接
                        () -> DataSourceUtils.getConnection(jdbcTemplate.getDataSource()),

                        // 2. 资源使用：创建流式查询
                        (Function<Connection, Publisher<RecordDb>>) connection -> {
                            try {
                                PreparedStatement stmt = createStreamingStatement(connection, sql);
                                ResultSet rs = stmt.executeQuery();

                                return Flux.create(sink -> {
                                    try {
                                        while (!sink.isCancelled() && rs.next()) {
                                            sink.next(jdbcRowMapper.mapRow(rs, 0));
                                        }
                                        sink.complete();
                                    } catch (SQLException e) {
                                        sink.error(new RuntimeException("Streaming failed", e));
                                    } finally {
                                        // 确保关闭结果集和statement
                                        try {
                                            if (rs != null) rs.close();
                                        } catch (SQLException e) {
                                            log.warn("ResultSet close error", e);
                                        }
                                        try {
                                            if (stmt != null) stmt.close();
                                        } catch (SQLException e) {
                                            log.warn("Statement close error", e);
                                        }
                                    }
                                });
                            } catch (SQLException e) {
                                throw new RuntimeException("Query preparation failed", e);
                            }
                        },

                        // 3. 资源清理：释放连接到连接池4
                        connection -> DataSourceUtils.releaseConnection(connection, jdbcTemplate.getDataSource()),

                        // 4. 错误处理
                        true  // 确保即使流被取消也执行清理
                )
                .subscribeOn(Schedulers.boundedElastic());
    }

    // 创建流式Statement（数据库相关配置）
    private PreparedStatement createStreamingStatement(Connection con, String sql) throws SQLException {
        // PostgreSQL/Oracle等游标模式
        PreparedStatement ps = con.prepareStatement(
                sql,
                ResultSet.TYPE_FORWARD_ONLY,
                ResultSet.CONCUR_READ_ONLY
        );
        ps.setFetchSize(100); // 分批获取行数
        ps.setFetchDirection(ResultSet.FETCH_FORWARD);
        return ps;
    }
}
