package mspbots.next.ticket.core.data.tickets;

import lombok.Data;
import org.reactivestreams.Publisher;
import org.springframework.boot.context.properties.ConfigurationProperties;

/**
 * mspbots.next.ticket.core.data.tickets.TicketsProperties
 *
 * @author <a href="https://github.com/vnobo">Alex bob</a>
 * @date Created by 2020/12/9
 */
@Data
@ConfigurationProperties("tickets.request")
public class TicketsProperties {

    private LimitRate limitRate = new LimitRate();

    /**
     * Ensure that the total amount requested upstream is capped at {@code cap}.
     * Backpressure signals from downstream subscribers are smaller than the cap are
     * propagated as is, but if they would cause the total requested amount to go over the
     * cap, they are reduced to the minimum value that doesn't go over.
     * <p>
     * As a result, this operator never let the upstream produce more elements than the
     * cap, and it can be used as a stricter form of {@link #take(long)}. Typically useful
     * for cases where a race between request and cancellation can lead the upstream to
     * producing a lot of extraneous data, and such a production is undesirable (e.g.
     * a source that would send the extraneous data over the network).
     * <p>
     * <img class="marble" src="doc-files/marbles/limitRequest.svg" alt="">
     *
     * @param requestCap the global backpressure limit to apply to the sum of downstream's requests
     * @return a {@link Flux} that requests AT MOST {@code cap} from upstream in total.
     * @see #limitRate
     * @see #take
     */
    private long limitRequest = 100;

    /**
     * Ensure that backpressure signals from downstream subscribers are split into batches
     * capped at the provided {@code highTide} first, then replenishing at the provided
     * {@code lowTide}, effectively rate limiting the upstream {@link Publisher}.
     * <p>
     * Note that this is an upper bound, and that this operator uses a prefetch-and-replenish
     * strategy, requesting a replenishing amount when 75% of the prefetch amount has been
     * emitted.
     * <p>
     * Typically used for scenarios where consumer(s) request a large amount of data
     * (eg. {@code Long.MAX_VALUE}) but the data source behaves better or can be optimized
     * with smaller requests (eg. database paging, etc...). All data is still processed,
     * unlike with {@link #limitRequest(long)} which will cap the grand total request
     * amount.
     * <p>
     * Similar to {@code flux.publishOn(Schedulers.immediate(), prefetchRate).subscribe() },
     * except with a customized "low tide" instead of the default 75%.
     * Note that the smaller the lowTide is, the higher the potential for concurrency
     * between request and data production. And thus the more extraneous replenishment
     * requests this operator could make. For example, for a global downstream
     * request of 14, with a highTide of 10 and a lowTide of 2, the operator would perform
     * low tide requests ({@code request(2)}) seven times in a row, whereas with the default
     * lowTide of 8 it would only perform one low tide request ({@code request(8)}).
     * Using a {@code lowTide} equal to {@code highTide} reverts to the default 75% strategy,
     * while using a {@code lowTide} of {@literal 0} disables the lowTide, resulting in
     * all requests strictly adhering to the highTide.
     * <p>
     * <img class="marble" src="doc-files/marbles/limitRateWithHighAndLowTide.svg" alt="">
     *
     * @param highTide the initial request amount
     * @param lowTide  the subsequent (or replenishing) request amount, {@literal 0} to
     *                 disable early replenishing, {@literal highTide} to revert to a 75% replenish strategy.
     * @return a {@link Flux} limiting downstream's backpressure and customizing the
     * replenishment request amount
     * @see #limitRequest
     */
    @Data
    public static class LimitRate {
        private int highTide = 10;
        private int lowTide = 5;
    }


}
