package org.jetlinks.pro.cassandra.metadata;

import com.datastax.oss.driver.api.core.CqlIdentifier;
import com.datastax.oss.driver.api.core.DriverException;
import com.datastax.oss.driver.api.core.cql.BoundStatement;
import com.datastax.oss.driver.api.core.cql.ColumnDefinition;
import com.datastax.oss.driver.api.core.cql.ColumnDefinitions;
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
import lombok.EqualsAndHashCode;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections.CollectionUtils;
import org.hswebframework.ezorm.core.CastUtil;
import org.hswebframework.ezorm.rdb.executor.BatchSqlRequest;
import org.hswebframework.ezorm.rdb.executor.DefaultColumnWrapperContext;
import org.hswebframework.ezorm.rdb.executor.NullValue;
import org.hswebframework.ezorm.rdb.executor.SqlRequest;
import org.hswebframework.ezorm.rdb.executor.reactive.ReactiveSqlExecutor;
import org.hswebframework.ezorm.rdb.executor.wrapper.ColumnWrapperContext;
import org.hswebframework.ezorm.rdb.executor.wrapper.ResultWrapper;
import org.reactivestreams.Publisher;
import org.springframework.data.cassandra.ReactiveResultSet;
import org.springframework.data.cassandra.ReactiveSession;
import org.springframework.data.cassandra.core.cql.CqlProvider;
import org.springframework.data.cassandra.core.cql.ReactiveCqlOperations;
import org.springframework.data.cassandra.core.cql.ReactivePreparedStatementCreator;
import org.springframework.util.ConcurrentReferenceHashMap;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;

import javax.annotation.Nonnull;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;

@Slf4j
public class CassandraReactiveSqlExecutor implements ReactiveSqlExecutor {

    private final ReactiveCqlOperations operations;

    private final Map<String, StatementCreator> statementCache;

    public CassandraReactiveSqlExecutor(ReactiveCqlOperations operations) {
        this(operations, new ConcurrentHashMap<>());
    }

    public CassandraReactiveSqlExecutor(ReactiveCqlOperations operations, Map<String, StatementCreator> statementCache) {
        this.operations = operations;
        this.statementCache = statementCache;
    }

    @EqualsAndHashCode(of = "cql")
    public static class StatementCreator implements ReactivePreparedStatementCreator, CqlProvider {
        @SuppressWarnings("all")
        private final static AtomicReferenceFieldUpdater<StatementCreator, Mono> CACHED = AtomicReferenceFieldUpdater
            .newUpdater(StatementCreator.class, Mono.class, "cached");

        private final String cql;

        private volatile Mono<PreparedStatement> cached;

        public StatementCreator(String cql) {
            this.cql = cql;
        }

        @Override
        @Nonnull
        public Mono<PreparedStatement> createPreparedStatement(@Nonnull ReactiveSession session) throws DriverException {
            if (cached == null) {
                Mono<PreparedStatement> newStatement = session
                    .prepare(cql)
                    .doOnNext(statement -> cached = Mono.just(statement))
                    .cache();
                CACHED.set(this, newStatement);
                return newStatement;
            }
            return cached;
        }

        @Nonnull
        @Override
        public String getCql() {
            return cql;
        }
    }

    protected <T> Flux<T> doExecute(Publisher<SqlRequest> request,
                                    BiFunction<SqlRequest, ReactiveResultSet, Publisher<T>> mapper) {
        return this
            .toFlux(request)
            .concatMap(sql -> {
                StatementCreator statement = statementCache
                    .computeIfAbsent(sql.getSql(), StatementCreator::new);
                return operations
                    .execute(statement,
                             (session, ps) -> CassandraPagingContext
                                 .current(sql)
                                 .map(ctx -> session
                                     .execute(doBind(ps.boundStatementBuilder()
                                                       .setPageSize(ctx.getRealPageSize())
                                                       .setPagingState(ctx.getPageState())
                                                       .build(), sql.getParameters())
                                     ))
                                 .defaultIfEmpty(
                                     Mono.defer(() -> session
                                         .execute(doBind(ps.bind(), sql.getParameters())))
                                 )
                                 .flatMap(Function.identity())
                                 .doOnNext(this::handleResultSet)
                                 .flatMapMany(result -> mapper.apply(sql, result))
                    );

            });
    }

    private BoundStatement doBind(BoundStatement statement, Object... args) {
        for (int i = 0; i < args.length; i++) {
            Object val = args[i];
            if (val instanceof NullValue) {
                statement = statement.setToNull(i);
            } else {
                statement = statement.set(i, val, (Class<Object>) val.getClass());
            }
        }
        return statement;
    }

    private void handleResultSet(ReactiveResultSet resultSet) {
        List<String> warnings = resultSet.getExecutionInfo().getWarnings();
        if (CollectionUtils.isNotEmpty(warnings)) {
            log.info("cassandra query warnings:{}", warnings);
        }
    }

    @Override
    public Mono<Integer> update(Publisher<SqlRequest> request) {

        return this
            .doExecute(request, (sql, resultSet) -> Mono.just(1))
            .reduce(Math::addExact);
    }

    @Override
    public Mono<Void> execute(Publisher<SqlRequest> request) {
        return this
            .doExecute(request, (sql, resultSet) -> Mono.empty())
            .then();
    }

    @Override
    public <E> Flux<E> select(Publisher<SqlRequest> request, ResultWrapper<E, ?> wrapper) {
        return this
            .doExecute(request, (sql, resultSet) -> {
                List<String> columns = StreamSupport
                    .stream(resultSet.getColumnDefinitions().spliterator(), false)
                    .map(ColumnDefinition::getName)
                    .map(CqlIdentifier::asInternal)
                    .collect(Collectors.toList());
                wrapper.beforeWrap(() -> columns);
                return CassandraPagingContext
                    .current(sql)
                    .doOnNext(ctx -> ctx.setNextPageState(resultSet.getExecutionInfo().getPagingState()))
                    //分页则使用availableRows
                    .map(ctx -> {
                        if (log.isDebugEnabled() && ctx.getFetchSkip() > 0) {
                            log.debug("fetch total {},skip {}",
                                      ctx.getRealPageSize(),
                                      ctx.getFetchSkip());
                        }
                        return resultSet.availableRows().skip(ctx.getFetchSkip());
                    })
                    //不分页使用rows
                    .defaultIfEmpty(Flux.defer(resultSet::rows))
                    .flatMapMany(Function.identity())
                    .map(row -> {
                        ColumnDefinitions defs = row.getColumnDefinitions();
                        E e = wrapper.newRowInstance();
                        for (int i = 0, size = defs.size(); i < size; i++) {
                            ColumnWrapperContext<E> context = new DefaultColumnWrapperContext<>(i, defs
                                .get(i)
                                .getName()
                                .asInternal(), row.getObject(i), e);

                            wrapper.wrapColumn(context);
                            e = context.getRowInstance();
                        }
                        if (!wrapper.completedWrapRow(e)) {
                            return Interrupted.instance;
                        }
                        return e;
                    })
                    .takeWhile(Interrupted::nonInterrupted)
                    .doOnComplete(wrapper::completedWrap)
                    .map(CastUtil::<E>cast);
            });
    }

    enum Interrupted {
        instance;

        static boolean nonInterrupted(Object o) {
            return o != instance;
        }
    }

    protected Flux<SqlRequest> toFlux(Publisher<SqlRequest> request) {

        return Flux
            .from(request)
            .flatMap(sql -> {
                if (sql instanceof BatchSqlRequest) {
                    return Flux.concat(Flux.just(sql), Flux.fromIterable(((BatchSqlRequest) sql).getBatch()));
                }
                return Flux.just(sql);
            })
            .filter(SqlRequest::isNotEmpty);
    }

}
