package test.s3;

import com.amazonaws.AmazonServiceException;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerBuilder;
import com.amazonaws.services.s3.transfer.Upload;
import com.amazonaws.services.s3.transfer.model.UploadResult;
import com.jfinal.kit.JsonKit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;

public class HighLevelMultipartUpload {
    private static final Logger logger = LoggerFactory.getLogger(HighLevelMultipartUpload.class);

    public static void main(String[] args) throws Exception {
//        String filePath = "f:/temp/10G.7z";
//        String fileKey = "1000/10G.7z";
//        String filePath = "f:/temp/1G.7z";
//        String fileKey = "1000/1G.7z";
        String filePath = "f:/temp/1m.7z";
        String fileKey = "1/1m.7z";
        ObjectMetadata metadata = new ObjectMetadata();
        try {
            AmazonS3 s3Client = AppTest.getAmazonS3();
            TransferManager tm = TransferManagerBuilder.standard()
                    .withS3Client(s3Client)
//                    .withMinimumUploadPartSize(1024*1024L)
                    .withMultipartUploadThreshold(10L)
                    .build();
            File file = new File(filePath);

            //自定义元数据
            metadata.setContentLength(file.length());
            metadata.addUserMetadata("name", "haha");  //可以设置一些属性，如公司码 = 1
            metadata.addUserMetadata("user", "1");
            InputStream in = new FileInputStream(file);
            PutObjectRequest request = new PutObjectRequest(AppTest.bucket_name, fileKey, in,metadata);

            // To receive notifications when bytes are transferred, add a
            // ProgressListener to your request.
            request.setGeneralProgressListener(progressEvent -> {
                try {
                    Thread.sleep(10000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
                logger.info("hashcode:{},event:{},bytes:{},transferred:{}",
                        progressEvent.hashCode(),
                        progressEvent.toString(),
                        progressEvent.getBytes(),
                        progressEvent.getBytesTransferred());
            });

            // TransferManager processes all transfers asynchronously,
            // so this call returns immediately.
//            Upload upload = tm.upload(AppTest.bucket_name, fileKey, new File(filePath));
            logger.info("Object upload started");
            Upload upload = tm.upload(request);

            // Optionally, wait for the upload to finish before continuing.
//            upload.waitForCompletion();
            UploadResult result = upload.waitForUploadResult();
            logger.info("Object upload complete,result:{}", JsonKit.toJson(result));
            tm.shutdownNow();
            System.out.println("shutdownNow !!");

//            tm.abortMultipartUploads(
//            AppTest.bucket_name, oneWeekAgo);
        }
        catch(AmazonServiceException e) {
            // The call was transmitted successfully, but Amazon S3 couldn't process
            // it, so it returned an error response.
            e.printStackTrace();
        }
        catch(SdkClientException e) {
            // Amazon S3 couldn't be contacted for a response, or the client
            // couldn't parse the response from Amazon S3.
            e.printStackTrace();
        }
    }
}