package org.elasticsearch;

import com.obs.services.ObsClient;
import com.obs.services.exception.ObsException;
import com.obs.services.model.*;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.blobstore.*;
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.*;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.stream.Collectors;

/**
 * ObS BlobContainer
 * Created by yangyin on 2023/06/08.
 */
public class ObsBlobContainer extends AbstractBlobContainer {

    private static final Logger logger = LogManager.getLogger(ObsBlobContainer.class);

    /**
     * Maximum number of deletes in a {@link DeleteObjectsRequest}.
     *
     */
    private static final int MAX_BULK_DELETES = 1000;

    private final ObsBlobStore blobStore;
    private final String keyPath;

    ObsBlobContainer(BlobPath path, ObsBlobStore blobStore) {
        super(path);
        this.blobStore = blobStore;
        this.keyPath = path.buildAsString();
    }

    public boolean blobExists(String blobName) {
        try (ObsReference clientReference = blobStore.clientReference()) {
            return SocketAccess.doPrivileged(() -> clientReference.client().doesObjectExist(blobStore.bucket(), buildKey(blobName)));
        } catch (final Exception e) {
            throw new BlobStoreException("Failed to check if blob [" + blobName + "] exists", e);
        }
    }

    @Override
    public InputStream readBlob(String blobName) throws IOException {
        return new ObsRetryingInputStream(blobStore, buildKey(blobName));
    }

    public InputStream readBlob(String blobName, long position, long length) throws IOException {
        if (position < 0L) {
            throw new IllegalArgumentException("position must be non-negative");
        }
        if (length < 0) {
            throw new IllegalArgumentException("length must be non-negative");
        }
        if (length == 0) {
            return new ByteArrayInputStream(new byte[0]);
        } else {
            return new ObsRetryingInputStream(blobStore, buildKey(blobName), position, Math.addExact(position, length - 1));
        }
    }

    public long readBlobPreferredLength() {
        // This container returns streams that must be fully consumed, so we tell consumers to make bounded requests.
        return new ByteSizeValue(32, ByteSizeUnit.MB).getBytes();
    }

    /**
     * This implementation ignores the failIfAlreadyExists flag as the OBS API has no way to enforce this due to its weak consistency model.
     */
    @Override
    public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
        assert inputStream.markSupported() : "No mark support on inputStream breaks the obs SDK's ability to retry requests";
        SocketAccess.doPrivilegedIOException(() -> {
            if (blobSize <= getLargeBlobThresholdInBytes()) {
                executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize);
            } else {
                executeMultipartUpload(blobStore, buildKey(blobName), inputStream, blobSize);
            }
            return null;
        });
    }

    // package private for testing
    long getLargeBlobThresholdInBytes() {
        return blobStore.bufferSizeInBytes();
    }

    @Override
    public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
        writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists);
    }

    @Override
    public void deleteBlob(String s) throws IOException {

    }

    @Override
    public DeleteResult delete() throws IOException {
        final AtomicLong deletedBlobs = new AtomicLong();
        final AtomicLong deletedBytes = new AtomicLong();
        try (ObsReference clientReference = blobStore.clientReference()) {
            final ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
            listObjectsRequest.setBucketName(blobStore.bucket());
            listObjectsRequest.setPrefix(keyPath);
            ObjectListing list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest));
            final List<String> blobsToDelete = new ArrayList<>();
            list.getObjectSummaries().forEach(obsObjectSummary -> {
                deletedBlobs.incrementAndGet();
                deletedBytes.addAndGet(obsObjectSummary.getMetadata().getContentLength());
                blobsToDelete.add(obsObjectSummary.getObjectKey());
            });
            if (list.isTruncated()) {
                doDeleteBlobs(blobsToDelete, false);
            } else {
                final List<String> lastBlobsToDelete = new ArrayList<>(blobsToDelete);
                doDeleteBlobs(lastBlobsToDelete, false);
            }
        } catch (final ObsException e) {
            throw new IOException("Exception when deleting blob container [" + keyPath + "]", e);
        }
        return new DeleteResult(deletedBlobs.get(), deletedBytes.get());
    }

    @Override
    public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException {
        doDeleteBlobs(blobNames, true);
    }

    private void doDeleteBlobs(List<String> blobNames, boolean relative) throws IOException {
        if (blobNames.isEmpty()) {
            return;
        }
        final Set<String> outstanding;
        if (relative) {
            outstanding = blobNames.stream().map(this::buildKey).collect(Collectors.toSet());
        } else {
            outstanding = new HashSet<>(blobNames);
        }
        try (ObsReference clientReference = blobStore.clientReference()) {
            final List<DeleteObjectsRequest> deleteRequests = new ArrayList<>();
            final List<String> partition = new ArrayList<>();
            for (String key : outstanding) {
                partition.add(key);
                if (partition.size() == MAX_BULK_DELETES) {
                    deleteRequests.add(bulkDelete(blobStore));
                    partition.clear();
                }
            }
            if (!partition.isEmpty()) {
                deleteRequests.add(bulkDelete(blobStore));
            }
            SocketAccess.doPrivilegedVoid(() -> {
                ObsException aex = null;
                ObsClient obsClient = clientReference.client();
                for (DeleteObjectsRequest deleteRequest : deleteRequests) {
                    List<String> keysInRequest = deleteRequest.getKeyAndVersionsList().stream().map(KeyAndVersion::getKey).collect(Collectors.toList());
                    try {
                        obsClient.deleteObjects(deleteRequest);
                        outstanding.removeAll(keysInRequest);
                    } catch (ObsException e) {
                        outstanding.removeAll(keysInRequest);
                        aex = ExceptionsHelper.useOrSuppress(aex, e);
                    }
                }
                if (aex != null) {
                    throw aex;
                }
            });
        } catch (Exception e) {
            throw new IOException("Failed to delete blobs [" + outstanding + "]", e);
        }
        assert outstanding.isEmpty();
    }

    private static DeleteObjectsRequest bulkDelete(ObsBlobStore blobStore) {
        DeleteObjectsRequest deleteObjectsRequest = new DeleteObjectsRequest(blobStore.bucket());
        ListVersionsRequest request = new ListVersionsRequest(blobStore.bucket());
        request.setMaxKeys(MAX_BULK_DELETES);
        ObsClient obsClient = blobStore.clientReference().client();
        ListVersionsResult result = obsClient.listVersions(request);
        for (VersionOrDeleteMarker v : result.getVersions()) {
            deleteObjectsRequest.addKeyAndVersion(v.getKey(), v.getVersionId());
        }
        return deleteObjectsRequest;
    }

    @Override
    public Map<String, BlobMetaData> listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException {
        try (ObsReference clientReference = blobStore.clientReference()) {
            return executeListing(clientReference, listObjectsRequest(blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix))).stream().flatMap(listing -> listing.getObjectSummaries().stream()).map(summary -> new PlainBlobMetaData(summary.getObjectKey().substring(keyPath.length()), summary.getMetadata().getContentLength())).collect(Collectors.toMap(PlainBlobMetaData::name, Function.identity()));
        } catch (final ObsException e) {
            throw new IOException("Exception when listing blobs by prefix [" + blobNamePrefix + "]", e);
        }
    }

    @Override
    public Map<String, BlobMetaData> listBlobs() throws IOException {
        return listBlobsByPrefix(null);
    }

    @Override
    public Map<String, BlobContainer> children() throws IOException {
        try (ObsReference clientReference = blobStore.clientReference()) {
            return executeListing(clientReference, listObjectsRequest(keyPath)).stream().flatMap(listing -> {
                        assert listing.getObjectSummaries().stream().noneMatch(s -> {
                            for (String commonPrefix : listing.getCommonPrefixes()) {
                                if (s.getObjectKey().substring(keyPath.length()).startsWith(commonPrefix)) {
                                    return true;
                                }
                            }
                            return false;
                        }) : "Response contained children for listed common prefixes.";
                        return listing.getCommonPrefixes().stream();
                    })
                    .map(prefix -> prefix.substring(keyPath.length()))
                    .filter(name -> name.isEmpty() == false)
                    .map(name -> name.substring(0, name.length() - 1))
                    .collect(Collectors.toMap(Function.identity(), name -> blobStore.blobContainer(path().add(name))));
        } catch (final ObsException e) {
            throw new IOException("Exception when listing children of [" + path().buildAsString() + ']', e);
        }
    }

    private static List<ObjectListing> executeListing(ObsReference clientReference, ListObjectsRequest listObjectsRequest) {
        final List<ObjectListing> results = new ArrayList<>();
            ObjectListing list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest));
            results.add(list);
        return results;
    }

    private ListObjectsRequest listObjectsRequest(String keyPath) {
        ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
        listObjectsRequest.setBucketName(blobStore.bucket());
        listObjectsRequest.setPrefix(keyPath);
        listObjectsRequest.setDelimiter("/");
        return listObjectsRequest;
    }

    private String buildKey(String blobName) {
        return keyPath + blobName;
    }

    /**
     * Uploads a blob using a single upload request
     */
    void executeSingleUpload(final ObsBlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {

        // Extra safety checks
        if (blobSize > ObsRepository.MAX_FILE_SIZE.getBytes()) {
            throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + ObsRepository.MAX_FILE_SIZE);
        }
        if (blobSize > blobStore.bufferSizeInBytes()) {
            throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
        }

        final ObjectMetadata md = new ObjectMetadata();
        md.setContentLength(blobSize);
        final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input);
        putRequest.setMetadata(new ObjectMetadata());
        putRequest.getMetadata().setStorageClass(blobStore.getStorageClass().getCode());
        putRequest.setAcl(blobStore.getAcl());

        try (ObsReference clientReference = blobStore.clientReference()) {
            SocketAccess.doPrivilegedVoid(() -> {
                clientReference.client().putObject(putRequest);
            });
        } catch (final ObsException e) {
            throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
        }
    }

    /**
     * Uploads a blob using multipart upload requests.
     */
    void executeMultipartUpload(final ObsBlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
        ensureMultiPartUploadSize(blobSize);
        final long partSize = blobStore.bufferSizeInBytes();
        final Tuple<Long, Long> multiparts = numberOfMultiparts(blobSize, partSize);

        if (multiparts.v1() > Integer.MAX_VALUE) {
            throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger buffer size?");
        }

        final int nbParts = multiparts.v1().intValue();
        final long lastPartSize = multiparts.v2();
        assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";

        final SetOnce<String> uploadId = new SetOnce<>();
        final String bucketName = blobStore.bucket();
        boolean success = false;

        final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setStorageClass(blobStore.getStorageClass().getCode());
        initRequest.setMetadata(metadata);
        initRequest.getMetadata().setStorageClass(blobStore.getStorageClass().getCode());
        initRequest.setAcl(blobStore.getAcl());
        final ObjectMetadata md = new ObjectMetadata();
        initRequest.setMetadata(md);
        try (ObsReference clientReference = blobStore.clientReference()) {

            uploadId.set(SocketAccess.doPrivileged(() -> clientReference.client().initiateMultipartUpload(initRequest).getUploadId()));
            if (Strings.isEmpty(uploadId.get())) {
                throw new IOException("Failed to initialize multipart upload " + blobName);
            }

            final List<PartEtag> parts = new ArrayList<>();

            long bytesCount = 0;
            for (int i = 1; i <= nbParts; i++) {
                final UploadPartRequest uploadRequest = new UploadPartRequest();
                uploadRequest.setBucketName(bucketName);
                uploadRequest.setObjectKey(blobName);
                uploadRequest.setUploadId(uploadId.get());
                uploadRequest.setPartNumber(i);
                uploadRequest.setInput(input);
                if (i < nbParts) {
                    uploadRequest.setPartSize(partSize);
                } else {
                    uploadRequest.setPartSize(lastPartSize);
                }
                bytesCount += uploadRequest.getPartSize();

                final UploadPartResult uploadResponse = SocketAccess.doPrivileged(() -> clientReference.client().uploadPart(uploadRequest));
                parts.add(new PartEtag(uploadResponse.getEtag(), uploadResponse.getPartNumber()));
            }

            if (bytesCount != blobSize) {
                throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount);
            }

            final CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts);
            SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(completeRequest));
            success = true;

        } catch (final ObsException e) {
            throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e);
        } finally {
            if ((success == false) && Strings.hasLength(uploadId.get())) {
                final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get());
                try (ObsReference clientReference = blobStore.clientReference()) {
                    SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortRequest));
                }
            }
        }
    }

    // non-static, package private for testing
    void ensureMultiPartUploadSize(final long blobSize) {
        if (blobSize > ObsRepository.MAX_FILE_SIZE_USING_MULTIPART.getBytes()) {
            throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be larger than " + ObsRepository.MAX_FILE_SIZE_USING_MULTIPART);
        }
        if (blobSize < ObsRepository.MIN_PART_SIZE_USING_MULTIPART.getBytes()) {
            throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be smaller than " + ObsRepository.MIN_PART_SIZE_USING_MULTIPART);
        }
    }

    /**
     * Returns the number parts of size of {@code partSize} needed to reach {@code totalSize},
     * along with the size of the last (or unique) part.
     *
     * @param totalSize the total size
     * @param partSize  the part size
     * @return a {@link Tuple} containing the number of parts to fill {@code totalSize} and
     * the size of the last part
     */
    static Tuple<Long, Long> numberOfMultiparts(final long totalSize, final long partSize) {
        if (partSize <= 0) {
            throw new IllegalArgumentException("Part size must be greater than zero");
        }

        if ((totalSize == 0L) || (totalSize <= partSize)) {
            return Tuple.tuple(1L, totalSize);
        }

        final long parts = totalSize / partSize;
        final long remaining = totalSize % partSize;

        if (remaining == 0) {
            return Tuple.tuple(parts, partSize);
        } else {
            return Tuple.tuple(parts + 1, remaining);
        }
    }
}
