//
// Created by Administrator on 2024/10/29.
//


#include "AwsS3Client.h"
#include <memory>


AwsS3Client::AwsS3Client() {
    Configuration &config = Configuration::getInstance();
    region = config.getS3Region();
    endpoint = config.getS3endpoint();
    bucketName = config.getS3BucketName();
    ak = config.getS3AK();
    sk = config.getS3SK();

    Aws::Client::ClientConfiguration cfg;
    cfg.endpointOverride = Aws::String(endpoint.c_str(), endpoint.size());
    cfg.scheme = Aws::Http::Scheme::HTTPS;
    cfg.region = Aws::String(region.c_str(), region.size());
    cfg.verifySSL = true;

    Aws::Auth::AWSCredentials credentials(
            Aws::String(ak.c_str(), ak.size()),
            Aws::String(sk.c_str(), sk.size())
    );
    s3_client_ = {credentials, nullptr, cfg};
}


void AwsS3Client::ListS3Buckets() {
    auto response = s3_client_.ListBuckets();
    if (!response.IsSuccess()) {
        throw std::runtime_error("Error while listing buckets:" + response.GetError().GetMessage());
    }

    auto buckets = response.GetResult().GetBuckets();
    tabulate::Table table;
    table.add_row({"Bucket Name", "Creation Date"});
    for (const auto &bucket: buckets) {
        table.add_row({bucket.GetName(), bucket.GetCreationDate().ToLocalTimeString(Aws::Utils::DateFormat::ISO_8601)});
    }
    std::cout << table << std::endl;

}

std::string AwsS3Client::UploadToS3(const Aws::String &objectKey, const Aws::String &filePath) {
    Aws::S3::Model::PutObjectRequest request;
    request.SetBucket(bucketName);
    request.SetKey(objectKey);

    std::shared_ptr<Aws::IOStream> fileStream = Aws::MakeShared<Aws::FStream>(
            "SampleAllocationTag",
            filePath.c_str(),
            std::ios_base::in | std::ios_base::binary
    );

    if (!*fileStream) {
        throw std::runtime_error("Error opening file:" + filePath);
    }

    request.SetBody(fileStream);
    auto putObjectOutcome = s3_client_.PutObject(request);
    if (!putObjectOutcome.IsSuccess()) {
        throw std::runtime_error("Upload failed::" + putObjectOutcome.GetError().GetMessage());
    }

    return filePath;
}

std::string AwsS3Client::UploadJsonToS3(const std::string &objectKey, const nlohmann::json &jsonData) {
    std::string jsonString = jsonData.dump();
    Aws::S3::Model::PutObjectRequest request;
    request.SetBucket(bucketName);
    request.SetKey(objectKey);
    const std::shared_ptr<Aws::IOStream> inputData = Aws::MakeShared<Aws::StringStream>("");
    *inputData << jsonString.c_str();

    request.SetBody(inputData);
    auto putObjectOutcome = s3_client_.PutObject(request);
    if (!putObjectOutcome.IsSuccess()) {
        throw std::runtime_error("Failed to upload JSON to S3: " + putObjectOutcome.GetError().GetMessage());
    }

    return objectKey;
}


Promise<std::string> AwsS3Client::Gather(std::vector<std::map<std::string, std::string>> &files) {
    auto pp = co_await Awaiter<Promise<std::string>::promise_type>{};
    for (const auto &file: files) {
        AwsS3Client client;
        std::string objKey = DcmToolKit::getObjKeyByPretzelProtocol(file);
        objKey = std::format("{}/{}", objKey, file.at("pipeline"));
        pp->value_ = client.UploadToS3(objKey, file.at("fileName"));
        co_await std::suspend_always{};
    }
}

std::vector<std::string> AwsS3Client::Executor(std::vector<std::map<std::string, std::string>> &files) {
    std::vector<std::string> tmp_files;
    std::coroutine_handle<Promise<std::string>::promise_type> handler = Gather(files);
    Promise<std::string>::promise_type &promise = handler.promise();
    for (auto file: files) {
        tmp_files.emplace_back(promise.value_);
        handler();
    }
    return tmp_files;
}
