package net.dybai.glacier.aws;

import com.amazonaws.services.glacier.AmazonGlacier;
import com.amazonaws.services.glacier.TreeHashGenerator;
import com.amazonaws.services.glacier.model.*;
import com.amazonaws.util.BinaryUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

public class Upload {
    private static final Logger LOG = LoggerFactory.getLogger(Upload.class);

    private static final int MAX_RETRY = 5;

    private long fileSize = 0;

    public String initiateMultipartUpload(AmazonGlacier glacier, String accountId, String vaultName, String partSize
            , String archiveDescription) {
        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest()
                .withAccountId(accountId)
                .withVaultName(vaultName)
                .withArchiveDescription(archiveDescription)
                .withPartSize(partSize);

        InitiateMultipartUploadResult result = glacier.initiateMultipartUpload(request);

        return result.getUploadId();
    }

    public String uploadParts(AmazonGlacier glacier, String accountId,  String vaultName, int partSize
            , String uploadId, InputStream in) throws IOException {
        int readLen = -1;
        long pos = 0;
        byte[] buf = new byte[partSize];
        List<byte[]> binaryChecksumList = new ArrayList<>();
        String range = null;

        while ((readLen = in.read(buf)) >= 0) {

            // 由于 Glacier 分段上传的要求，除了最后一段外，每段上传大小必须为 partSize，否则将导致后面的部分上传失败。
            // 由于管道内未必缓冲足够上传一次的数据，所以需要在这里读够足量的数据才能进行上传。
            if (readLen < partSize) {
                int reReadLen = -1;
                int reReadPos = readLen;
                while ((reReadLen = in.read(buf, reReadPos, partSize - reReadPos)) > 0) {
                    reReadPos += reReadLen;
                }
                readLen = reReadPos;
            }

            // 必须拷贝一份，并且下面2处使用必须都要 new ByteArrayInputStream()，否则会上传出错。
            byte[] bytesRead = Arrays.copyOf(buf, readLen);
            String checksum = TreeHashGenerator.calculateTreeHash(new ByteArrayInputStream(bytesRead));
            byte[] binaryChecksum = BinaryUtils.fromHex(checksum);
            binaryChecksumList.add(binaryChecksum);

            range = String.format("bytes %s-%s/*", pos, pos + readLen - 1);
            LOG.info("Uploading part " + range);

            UploadMultipartPartRequest uploadRequest = new UploadMultipartPartRequest()
                    .withAccountId(accountId)
                    .withVaultName(vaultName)
                    .withUploadId(uploadId)
                    .withBody(new ByteArrayInputStream(bytesRead))
                    .withChecksum(checksum)
                    .withRange(range);
            UploadMultipartPartResult result = null;
            for (int times = 0; times < MAX_RETRY; times++) {
                try {
                    result = glacier.uploadMultipartPart(uploadRequest);
                    break;
                } catch (Exception e) {
                    LOG.warn("Upload occur error, Retry times: " + (times+1), e);
                    if (times == MAX_RETRY-1) {
                        throw e;
                    }
                }
            }
            LOG.info("Part uploaded, checksum: " + result.getChecksum());

            pos += readLen;
        }
        this.fileSize = pos;
        return TreeHashGenerator.calculateTreeHash(binaryChecksumList);
    }

    public String uploadParts(AmazonGlacier glacier, String accountId,  String vaultName, int partSize
            , String uploadId, File archiveFile) throws IOException {
        try (InputStream in = new FileInputStream(archiveFile)) {
            return uploadParts(glacier, accountId, vaultName, partSize, uploadId, in);
        }
    }

    public String completeMultipartUpload(AmazonGlacier glacier, String accountId,  String vaultName, String uploadId
            , String checksum) {
        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest()
                .withAccountId(accountId)
                .withVaultName(vaultName)
                .withUploadId(uploadId)
                .withChecksum(checksum)
                .withArchiveSize(String.valueOf(fileSize));

        CompleteMultipartUploadResult result = glacier.completeMultipartUpload(completeRequest);
        return result.getLocation();
    }

    public String abortMultipartUpload(AmazonGlacier glacier, String accountId,  String vaultName, String uploadId) {
        AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest()
                .withAccountId(accountId)
                .withVaultName(vaultName)
                .withUploadId(uploadId);
        AbortMultipartUploadResult result =  glacier.abortMultipartUpload(abortRequest);
        return result.getSdkResponseMetadata().toString();
    }
}
