package com.booter.webflux.filter;

import lombok.extern.slf4j.Slf4j;
import org.springframework.core.io.buffer.DataBuffer;
import org.springframework.http.HttpMethod;
import org.springframework.http.MediaType;
import org.springframework.http.server.reactive.ServerHttpRequest;
import org.springframework.http.server.reactive.ServerHttpRequestDecorator;
import org.springframework.util.StringUtils;
import reactor.core.publisher.Flux;
import reactor.core.scheduler.Schedulers;

import java.util.Optional;
import java.util.stream.Collectors;

/**
 * {@link #https://www.jianshu.com/p/769f6e9824fb}
 * 在 Reactor 中，Scheduler 用来定义执行调度任务的抽象。可以简单理解为线程池
 * Schedulers.elastic(): 调度器会动态创建工作线程，线程数无上界，类似于Execturos.newCachedThreadPool()
 * Schedulers.parallel(): 创建固定线程数的调度器，默认线程数等于 CPU 核心数。
 * publishOn 和 subscribeOn。这两个方法的作用是指定执行 Reactive Streaming的Scheduler
 * 组成一个反应式流的代码有快有慢，例如NIO、BIO。如果将这些功能都放在一个线程里执行，快的就会被慢的影响，所以需要相互隔离
 * <p>
 * publishOn 影响在其之后的 operator 执行的线程池
 * subscribeOn 则会从源头影响整个执行过程。所以publishOn 的影响范围和它的位置有关，而 subscribeOn 的影响范围则和位置无关。
 * <p>
 * Mono<Void> fluxToBlockingRepository(Flux<User> flux, BlockingRepository<User> repository) {
 * return flux.publishOn(Schedulers.elastic())
 * .doOnNext(repository::save)
 * .then();
 * }
 * 执行了 publishOn(Schedulers.elastic()) 之后，repository::save 就会被 Schedulers.elastic() 定义的线程池所执行
 * Flux<User> blockingRepositoryToFlux(BlockingRepository<User> repository)  {
 * return Flux.defer(() -> Flux.fromIterable(repository.findAll()))
 * .subscribeOn(Schedulers.elastic());
 * }
 * subscribeOn(Schedulers.elastic()) 的作用类似。它使得 repository.findAll()（也包括 Flux.fromIterable）的执行发生在 Schedulers.elastic() 所定义的线程池中
 * publishOn 影响在其之后的 operator 执行的线程池，而 subscribeOn 则会从源头影响整个执行过程。所以，publishOn 的影响范围和它的位置有关，而 subscribeOn 的影响范围则和位置无关。
 * <p>
 */
@Slf4j
public class RequestBodyServerHttpRequestDecorator extends ServerHttpRequestDecorator {

    private Flux<DataBuffer> body;

    RequestBodyServerHttpRequestDecorator(ServerHttpRequest delegate) {
        super(delegate);
        final String path = delegate.getURI().getPath();
        final String query = delegate.getURI().getQuery();
        final String method = Optional.ofNullable(delegate.getMethod()).orElse(HttpMethod.GET).name();
        final String headers = delegate.getHeaders().entrySet()
                .stream()
                .map(entry -> "            " + entry.getKey() + ": [" + String.join(";", entry.getValue()) + "]")
                .collect(Collectors.joining("\n"));
        final MediaType contentType = delegate.getHeaders().getContentType();
        if (log.isDebugEnabled()) {
            log.debug("HttpMethod : {}\n" +
                    "Uri        : {}\n" +
                    "Headers    : \n" +
                    "{}", method, path + (StringUtils.isEmpty(query) ? "" : "?" + query), headers);
        }
        Flux<DataBuffer> flux = super.getBody();
        body = flux;
        if (LogUtils.legalLogMediaTypes.contains(contentType)) {
            //
            body = flux.publishOn(Schedulers.elastic()).map(dataBuffer -> LogUtils.loggingRequest(log, dataBuffer));
        } else {
            body = flux;
        }

//        flux.subscribe(buffer -> {
//            byte[] bytes = new byte[buffer.readableByteCount()];
//            buffer.read(bytes);
//            DataBufferUtils.release(buffer);
//            try {
//                String bodyString = new String(bytes, "utf-8");
//                System.out.println(bodyString);
//            } catch (UnsupportedEncodingException e) {
//                e.printStackTrace();
//            }
//        });

    }

    @Override
    public Flux<DataBuffer> getBody() {
        return body;
    }

}