package cc.magicjson.easy.batch.core;

import cc.magicjson.easy.batch.config.BatchSqlProvider;
import cc.magicjson.easy.batch.exception.BatchOperationException;
import cc.magicjson.easy.batch.model.BatchResult;
import com.google.common.collect.Lists;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.stereotype.Component;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.transaction.support.TransactionTemplate;
import org.springframework.util.CollectionUtils;

import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;

/**
 * 增强的批量操作工具类
 * 支持：
 * 1. 注解式事务和编程式事务
 * 2. 自动分批处理
 * 3. 批处理进度监控
 * 4. 异常重试机制
 * 5. 并行处理支持
 */
@Component
@Slf4j
public class BatchOperations {
    private final JdbcTemplate jdbcTemplate;
    private final TransactionTemplate transactionTemplate;
    private final BatchSqlProvider sqlProvider;

    @Value("${batch.operations.default-size:1000}")
    private int defaultBatchSize;

    @Value("${batch.operations.max-size:5000}")
    private int maxBatchSize;

    @Value("${batch.operations.retry.max-attempts:3}")
    private int maxRetryAttempts;

    @Value("${batch.operations.retry.delay-ms:1000}")
    private long retryDelayMs;

    public BatchOperations(JdbcTemplate jdbcTemplate,
                           PlatformTransactionManager transactionManager,
                           BatchSqlProvider sqlProvider) {
        this.jdbcTemplate = jdbcTemplate;
        this.transactionTemplate = new TransactionTemplate(transactionManager);
        this.sqlProvider = sqlProvider;
    }

    /**
     * 使用注解式事务的批量操作
     */
    @Transactional(rollbackFor = Exception.class)
    public <T> BatchResult<T> executeBatchWithAnnotation(
        List<T> items,
        String sqlKey,
        BatchParameterExtractor<T> parameterExtractor) {
        return doBatchOperation(items, sqlKey, parameterExtractor, false);
    }

    /**
     * 使用编程式事务的批量操作
     */
    public <T> BatchResult<T> executeBatchWithProgrammatic(
        List<T> items,
        String sqlKey,
        BatchParameterExtractor<T> parameterExtractor) {
        return transactionTemplate.execute(status -> {
            try {
                return doBatchOperation(items, sqlKey, parameterExtractor, false);
            } catch (Exception e) {
                status.setRollbackOnly();
                throw new BatchOperationException("Batch operation failed", e);
            }
        });
    }

    /**
     * 大数据量分批处理（支持进度监控）
     */
    public <T> BatchResult<T> executeLargeBatch(
        List<T> items,
        String sqlKey,
        BatchParameterExtractor<T> parameterExtractor,
        BatchProgressCallback progressCallback) {

        int optimalBatchSize = calculateOptimalBatchSize(items.size());
        List<List<T>> batches = Lists.partition(items, optimalBatchSize);
        BatchResult<T> totalResult = new BatchResult<>();

        for (int i = 0; i < batches.size(); i++) {
            List<T> batch = batches.get(i);
            BatchResult<T> batchResult = executeBatchWithRetry(batch, sqlKey, parameterExtractor);
            totalResult.merge(batchResult);

            // 回调进度
            if (progressCallback != null) {
                float progress = (i + 1) * 100f / batches.size();
                progressCallback.onProgress(progress, totalResult.getAffectedRows());
            }
        }

        return totalResult;
    }

    /**
     * 并行批量处理（适用于大数据量）
     */
    public <T> BatchResult<T> executeParallelBatch(
        List<T> items,
        String sqlKey,
        BatchParameterExtractor<T> parameterExtractor,
        int threadCount) {

        int optimalBatchSize = calculateOptimalBatchSize(items.size());
        List<List<T>> batches = Lists.partition(items, optimalBatchSize);

        ExecutorService executor = Executors.newFixedThreadPool(threadCount);
        try {
            List<CompletableFuture<BatchResult<T>>> futures = batches.stream()
                .map(batch -> CompletableFuture.supplyAsync(() ->
                    executeBatchWithRetry(batch, sqlKey, parameterExtractor), executor))
                .collect(Collectors.toList());

            BatchResult<T> totalResult = new BatchResult<>();
            CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
                .thenAccept(v -> futures.forEach(f -> totalResult.merge(f.join())))
                .join();

            return totalResult;
        } finally {
            executor.shutdown();
        }
    }

    /**
     * 带重试机制的批量操作
     */
    private <T> BatchResult<T> executeBatchWithRetry(
        List<T> items,
        String sqlKey,
        BatchParameterExtractor<T> parameterExtractor) {

        int attempts = 0;
        while (attempts < maxRetryAttempts) {
            try {
                return doBatchOperation(items, sqlKey, parameterExtractor, true);
            } catch (Exception e) {
                attempts++;
                if (attempts >= maxRetryAttempts) {
                    throw new BatchOperationException(
                        "Batch operation failed after " + attempts + " attempts", e);
                }
                log.warn("Batch operation attempt {} failed, retrying...", attempts, e);
                try {
                    Thread.sleep(retryDelayMs * attempts);
                } catch (InterruptedException ie) {
                    Thread.currentThread().interrupt();
                    throw new BatchOperationException("Retry interrupted", ie);
                }
            }
        }
        throw new BatchOperationException("Batch operation failed after max retries");
    }

    /**
     * 核心批处理操作
     */
    private <T> BatchResult<T> doBatchOperation(
        List<T> items,
        String sqlKey,
        BatchParameterExtractor<T> parameterExtractor,
        boolean isRetry) {

        if (CollectionUtils.isEmpty(items)) {
            return new BatchResult<>();
        }

        String sql = sqlProvider.getSql(sqlKey);
        BatchResult<T> result = new BatchResult<>();
        result.setTotalItems(items.size());

        try {
            int[] updateCounts = jdbcTemplate.batchUpdate(sql,
                new BatchPreparedStatementSetter() {
                    @Override
                    public void setValues(PreparedStatement ps, int i) throws SQLException {
                        Object[] params = parameterExtractor.extractParameters(items.get(i));
                        for (int j = 0; j < params.length; j++) {
                            ps.setObject(j + 1, params[j]);
                        }
                    }

                    @Override
                    public int getBatchSize() {
                        return items.size();
                    }
                });

            result.setAffectedRows(Arrays.stream(updateCounts).sum());
            result.setSuccessful(true);

        } catch (Exception e) {
            result.setSuccessful(false);
            result.setErrorMessage(e.getMessage());
            if (!isRetry) {
                throw new BatchOperationException("Batch operation failed", e);
            }
        }

        return result;
    }

    /**
     * 计算最优批次大小
     */
    private int calculateOptimalBatchSize(int dataSize) {
        int optimalSize = Math.min(defaultBatchSize, dataSize);
        return Math.min(optimalSize, maxBatchSize);
    }
}
