package demo.flink.program.rest;

import demo.flink.program.ClusterClientDemo;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.JobStatus;
import org.apache.flink.api.common.cache.DistributedCache;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.client.program.rest.RestClusterClientConfiguration;
import org.apache.flink.client.program.rest.retry.ExponentialWaitStrategy;
import org.apache.flink.client.program.rest.retry.WaitStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.client.JobStatusMessage;
import org.apache.flink.runtime.client.JobSubmissionException;
import org.apache.flink.runtime.concurrent.FutureUtils;
import org.apache.flink.runtime.concurrent.ScheduledExecutorServiceAdapter;
import org.apache.flink.runtime.jobgraph.JobGraph;
import org.apache.flink.runtime.jobgraph.OperatorID;
import org.apache.flink.runtime.jobmaster.JobResult;
import org.apache.flink.runtime.messages.Acknowledge;
import org.apache.flink.runtime.operators.coordination.CoordinationRequest;
import org.apache.flink.runtime.operators.coordination.CoordinationResponse;
import org.apache.flink.runtime.rest.FileUpload;
import org.apache.flink.runtime.rest.RestClient;
import org.apache.flink.runtime.rest.messages.*;
import org.apache.flink.runtime.rest.messages.job.JobExecutionResultHeaders;
import org.apache.flink.runtime.rest.messages.job.JobSubmitHeaders;
import org.apache.flink.runtime.rest.messages.job.JobSubmitRequestBody;
import org.apache.flink.runtime.rest.messages.job.JobSubmitResponseBody;
import org.apache.flink.runtime.rest.messages.queue.AsynchronouslyCreatedResource;
import org.apache.flink.runtime.rest.messages.queue.QueueStatus;
import org.apache.flink.runtime.rest.util.RestClientException;
import org.apache.flink.runtime.rest.util.RestConstants;
import org.apache.flink.runtime.util.ExecutorThreadFactory;
import org.apache.flink.runtime.webmonitor.retriever.LeaderRetriever;
import org.apache.flink.shaded.netty4.io.netty.channel.ConnectTimeoutException;
import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpResponseStatus;
import org.apache.flink.util.ExceptionUtils;
import org.apache.flink.util.FlinkException;
import org.apache.flink.util.function.CheckedSupplier;

import javax.annotation.Nullable;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.concurrent.*;
import java.util.function.Predicate;
import java.util.function.Supplier;

public class RestClusterClientDemo<T> implements ClusterClientDemo {

    private final RestClusterClientConfiguration restClusterClientConfiguration;
    private final Configuration configuration;
    private final RestClient restClient;
    private final T clusterId;
    private final WaitStrategy waitStrategy = new ExponentialWaitStrategy(10L, 2000L);
    private final ExecutorService executorService = Executors.newFixedThreadPool(4, new ExecutorThreadFactory("Flink-RestClusterClient-IO"));

    private ScheduledExecutorService retryExecutorService = Executors.newSingleThreadScheduledExecutor(new ExecutorThreadFactory("Flink-RestClusterClient-Retry"));

    private final LeaderRetriever webMonitorLeaderRetriever = new LeaderRetriever();

    public RestClusterClientDemo(RestClusterClientConfiguration restClusterClientConfiguration, Configuration configuration, RestClient restClient, T clusterId) {
        this.restClusterClientConfiguration = restClusterClientConfiguration;
        this.configuration = configuration;
        this.restClient = restClient;
        this.clusterId = clusterId;
    }

    @Override
    public void close() {

    }

    @Override
    public Object getClusterId() {
        return this.clusterId;
    }

    @Override
    public Configuration getFlinkConfiguration() {
        return null;
    }

    @Override
    public void shutDownCluster() {

    }

    @Override
    public String getWebInterfaceURL() {
        return null;
    }

    @Override
    public CompletableFuture<Collection<JobStatusMessage>> listJobs() throws Exception {
        return null;
    }

    @Override
    public CompletableFuture<Acknowledge> disposeSavepoint(String savepointPath) throws FlinkException {
        return null;
    }

    @Override
    public CompletableFuture<JobID> submitJob(JobGraph jobGraph) {

        CompletableFuture<Path> jobGraphFileFuture = CompletableFuture.supplyAsync(() -> {
            try {
                Path jobGraphFile = Files.createTempFile("flink-jobgraph", ".bin");
                try (ObjectOutputStream objectOut = new ObjectOutputStream(Files.newOutputStream(jobGraphFile))) {
                    objectOut.writeObject(jobGraph);
                }
                return jobGraphFile;
            } catch (IOException e) {
                throw new CompletionException(new FlinkException("Failed to serialize JobGraph.", e));
            }
        }, executorService);

        CompletableFuture<Tuple2<JobSubmitRequestBody, Collection<FileUpload>>> requestFuture = jobGraphFileFuture.thenApply(jobGraphFile -> {
            List<String> jarFileNames = new ArrayList<>(8);
            List<JobSubmitRequestBody.DistributedCacheFile> artifactFileNames = new ArrayList<>(8);
            Collection<FileUpload> filesToUpload = new ArrayList<>(8);

            filesToUpload.add(new FileUpload(jobGraphFile, RestConstants.CONTENT_TYPE_BINARY));
            for (org.apache.flink.core.fs.Path jar : jobGraph.getUserJars()) {
                jarFileNames.add(jar.getName());
                filesToUpload.add(new FileUpload(Paths.get(jar.toUri()), RestConstants.CONTENT_TYPE_JAR));
            }

            for (Map.Entry<String, DistributedCache.DistributedCacheEntry> artifacts: jobGraph.getUserArtifacts().entrySet()) {
                final org.apache.flink.core.fs.Path artifactPath = new org.apache.flink.core.fs.Path(artifacts.getValue().filePath);

                try {
                    if (!artifactPath.getFileSystem().isDistributedFS()) {
                        artifactFileNames.add(new JobSubmitRequestBody.DistributedCacheFile(artifacts.getKey(), artifactPath.getName()));
                        filesToUpload.add(new FileUpload(Paths.get(artifacts.getValue().filePath), RestConstants.CONTENT_TYPE_BINARY));
                    }
                } catch (IOException e) {
                    throw new CompletionException(
                            new FlinkException("Failed to get the FileSystem of artifact " + artifactPath + ".", e));
                }
            }

            final JobSubmitRequestBody requestBody = new JobSubmitRequestBody(
                    jobGraphFile.getFileName().toString(),
                    jarFileNames,
                    artifactFileNames);
            return Tuple2.of(requestBody, Collections.unmodifiableCollection(filesToUpload));
        });

        CompletableFuture<JobSubmitResponseBody> submissionFuture = requestFuture.thenCompose(requestAndFileUpload -> sendRetriableRequest(
                JobSubmitHeaders.getInstance(),
                EmptyMessageParameters.getInstance(),
                requestAndFileUpload.f0,
                requestAndFileUpload.f1,
                isConnectionProblemOrServiceUnavailable()

        ));

        submissionFuture
                .thenCombine(jobGraphFileFuture, (ignored, jobGraphFile) -> jobGraphFile)
                .thenAccept(jobGraphFile -> {
                    try {
                        Files.delete(jobGraphFile);
                    } catch (IOException e) {
                        e.printStackTrace();
                    }

                });


        return submissionFuture.thenApply(ignore -> jobGraph.getJobID())
                .exceptionally(
                        (Throwable throwable) -> {
                            throw new CompletionException(new JobSubmissionException(jobGraph.getJobID(), "Failed to submit JobGraph.", ExceptionUtils.stripCompletionException(throwable)));
                        });
    }

    private <M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P>
    sendRetriableRequest(M messageHeaders, U messageParameters, R request, Collection<FileUpload> filesToUpload, Predicate<Throwable> retryPredicate) {
        return retry(() -> getWebMonitorBaseUrl().thenCompose(webMonitorBaseUrl -> {
            try {
                return restClient.sendRequest(webMonitorBaseUrl.getHost(), webMonitorBaseUrl.getPort(), messageHeaders, messageParameters, request, filesToUpload);
            } catch (IOException e) {
                throw new CompletionException(e);
            }
        }), retryPredicate);
    }

    @VisibleForTesting
    CompletableFuture<URL> getWebMonitorBaseUrl() {
        return FutureUtils.orTimeout(
                webMonitorLeaderRetriever.getLeaderFuture(),
                restClusterClientConfiguration.getAwaitLeaderTimeout(),
                TimeUnit.MILLISECONDS)
                .thenApplyAsync(leaderAddressSessionId -> {
                    final String url = leaderAddressSessionId.f0;
                    try {
                        return new URL(url);
                    } catch (MalformedURLException e) {
                        throw new IllegalArgumentException("Could not parse URL from " + url, e);
                    }
                }, executorService);
    }

    private static Predicate<Throwable> isConnectionProblemOrServiceUnavailable() {
        return isConnectionProblemException().or(isServiceUnavailable());
    }
    private static Predicate<Throwable> isConnectionProblemException() {
        return (throwable) ->
                ExceptionUtils.findThrowable(throwable, java.net.ConnectException.class).isPresent() ||
                        ExceptionUtils.findThrowable(throwable, java.net.SocketTimeoutException.class).isPresent() ||
                        ExceptionUtils.findThrowable(throwable, ConnectTimeoutException.class).isPresent() ||
                        ExceptionUtils.findThrowable(throwable, IOException.class).isPresent();
    }

    private static Predicate<Throwable> isServiceUnavailable() {
        return httpExceptionCodePredicate(code -> code == HttpResponseStatus.SERVICE_UNAVAILABLE.code());
    }
    private static Predicate<Throwable> httpExceptionCodePredicate(Predicate<Integer> statusCodePredicate) {
        return (throwable) -> ExceptionUtils.findThrowable(throwable, RestClientException.class)
                .map(restClientException -> {
                    final int code = restClientException.getHttpResponseStatus().code();
                    return statusCodePredicate.test(code);
                })
                .orElse(false);
    }
    private <C> CompletableFuture<C> retry(
            CheckedSupplier<CompletableFuture<C>> operation,
            Predicate<Throwable> retryPredicate) {
        return FutureUtils.retryWithDelay(
                CheckedSupplier.unchecked(operation),
                restClusterClientConfiguration.getRetryMaxAttempts(),
                Time.milliseconds(restClusterClientConfiguration.getRetryDelay()),
                retryPredicate,
                new ScheduledExecutorServiceAdapter(retryExecutorService));
    }

    @Override
    public CompletableFuture<JobStatus> getJobStatus(JobID jobId) {
        return null;
    }

    @Override
    public CompletableFuture<JobResult> requestJobResult(JobID jobId) {

        return pollResourceAsync(() -> {
            JobMessageParameters jobMessageParameters = new JobMessageParameters();
            jobMessageParameters.jobPathParameter.resolve(jobId);
            return sendRequest(JobExecutionResultHeaders.getInstance(), jobMessageParameters, EmptyRequestBody.getInstance());
        });
    }


    @VisibleForTesting
    public <M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P>
    sendRequest(M messageHeaders, U messageParameters, R request) {
        return sendRetriableRequest(messageHeaders, messageParameters, request, Collections.emptyList(), isConnectionProblemOrServiceUnavailable());
    }

    private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync(
            final Supplier<CompletableFuture<A>> resourceFutureSupplier) {

        return pollResourceAsync(resourceFutureSupplier, new CompletableFuture<>());
    }

    private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync(
            final Supplier<CompletableFuture<A>> resourceFutureSupplier,
            final CompletableFuture<R> resultFuture) {

        resourceFutureSupplier.get().whenComplete((asynchronouslyCreatedResource, throwable) -> {
            if (throwable != null) {
                resultFuture.completeExceptionally(throwable);
            } else {
                if (asynchronouslyCreatedResource.queueStatus().getId() == QueueStatus.Id.COMPLETED) {
                    resultFuture.complete(asynchronouslyCreatedResource.resource());
                } else {
                    retryExecutorService.schedule(() -> {
                        pollResourceAsync(resourceFutureSupplier, resultFuture);
                    }, waitStrategy.sleepTime(0), TimeUnit.MILLISECONDS);
                }
            }
        });

        return resultFuture;
    }

    @Override
    public CompletableFuture<Map<String, Object>> getAccumulators(JobID jobID, ClassLoader loader) {
        return null;
    }

    @Override
    public CompletableFuture<Acknowledge> cancel(JobID jobId) {
        return null;
    }

    @Override
    public CompletableFuture<String> cancelWithSavepoint(JobID jobId, @Nullable String savepointDirectory) {
        return null;
    }

    @Override
    public CompletableFuture<String> stopWithSavepoint(JobID jobId, boolean advanceToEndOfEventTime, @Nullable String savepointDirectory) {
        return null;
    }

    @Override
    public CompletableFuture<String> triggerSavepoint(JobID jobId, @Nullable String savepointDirectory) {
        return null;
    }

    @Override
    public CompletableFuture<CoordinationResponse> sendCoordinationRequest(JobID jobId, OperatorID operatorId, CoordinationRequest request) {
        return null;
    }
}
