/*
 * Copyright 2021 Vectorized, Inc.
 *
 * Licensed as a Redpanda Enterprise file under the Redpanda Community
 * License (the "License"); you may not use this file except in compliance with
 * the License. You may obtain a copy of the License at
 *
 * https://github.com/vectorizedio/redpanda/blob/master/licenses/rcl.md
 */

#include "archival/ntp_archiver_service.h"

#include "archival/logger.h"
#include "cloud_storage/remote.h"
#include "cloud_storage/types.h"
#include "model/metadata.h"
#include "s3/client.h"
#include "s3/error.h"
#include "storage/disk_log_impl.h"
#include "storage/fs_utils.h"
#include "utils/gate_guard.h"

#include <seastar/core/coroutine.hh>
#include <seastar/core/file.hh>
#include <seastar/core/loop.hh>
#include <seastar/core/lowres_clock.hh>
#include <seastar/core/semaphore.hh>
#include <seastar/core/shared_ptr.hh>
#include <seastar/core/timed_out_error.hh>
#include <seastar/core/when_all.hh>
#include <seastar/util/noncopyable_function.hh>

#include <fmt/format.h>

#include <exception>
#include <stdexcept>

namespace archival {

std::ostream& operator<<(std::ostream& o, const configuration& cfg) {
    fmt::print(
      o,
      "{{bucket_name: {}, interval: {}, client_config: {}, connection_limit: "
      "{}}}",
      cfg.bucket_name,
      cfg.interval.count(),
      cfg.client_config,
      cfg.connection_limit);
    return o;
}

ntp_archiver::ntp_archiver(
  const storage::ntp_config& ntp,
  const configuration& conf,
  cloud_storage::remote& remote,
  service_probe& svc_probe)
  : _svc_probe(svc_probe)
  , _probe(conf.ntp_metrics_disabled, ntp.ntp())
  , _ntp(ntp.ntp())
  , _rev(ntp.get_revision())
  , _remote(remote)
  , _policy(_ntp, _svc_probe, std::ref(_probe))
  , _bucket(conf.bucket_name)
  , _manifest(_ntp, _rev)
  , _gate() {
    vlog(archival_log.trace, "Create ntp_archiver {}", _ntp.path());
}

ss::future<> ntp_archiver::stop() {
    _as.request_abort();
    return _gate.close();
}

const model::ntp& ntp_archiver::get_ntp() const { return _ntp; }

model::revision_id ntp_archiver::get_revision_id() const { return _rev; }

const ss::lowres_clock::time_point ntp_archiver::get_last_upload_time() const {
    return _last_upload_time;
}

const cloud_storage::manifest& ntp_archiver::get_remote_manifest() const {
    return _manifest;
}

ss::future<cloud_storage::download_result>
ntp_archiver::download_manifest(retry_chain_node& parent) {
    gate_guard guard{_gate};
    retry_chain_node fib(manifest_upload_timeout, 100ms, &parent);
    vlog(archival_log.debug, "{} Downloading manifest for {}", fib(), _ntp);
    co_return co_await _remote.download_manifest(_bucket, _manifest, fib);
}

ss::future<cloud_storage::upload_result>
ntp_archiver::upload_manifest(retry_chain_node& parent) {
    gate_guard guard{_gate};
    retry_chain_node fib(manifest_upload_timeout, 100ms, &parent);
    vlog(archival_log.debug, "{} Uploading manifest for {}", fib(), _ntp);
    co_return co_await _remote.upload_manifest(_bucket, _manifest, fib);
}

ss::future<cloud_storage::upload_result> ntp_archiver::upload_segment(
  upload_candidate candidate, retry_chain_node& parent) {
    gate_guard guard{_gate};
    retry_chain_node fib(segment_upload_timeout, 100ms, &parent);
    vlog(
      archival_log.debug,
      "{} Uploading segment for {}, exposed name {} offset {}, length {}",
      fib(),
      _ntp,
      candidate.exposed_name,
      candidate.starting_offset,
      candidate.content_length);

    auto reset_func = [candidate] {
        auto stream = candidate.source->reader().data_stream(
          candidate.file_offset, ss::default_priority_class());
        return stream;
    };
    co_return co_await _remote.upload_segment(
      _bucket,
      candidate.exposed_name,
      candidate.content_length,
      reset_func,
      _manifest,
      fib);
}

ss::future<ntp_archiver::batch_result> ntp_archiver::upload_next_candidates(
  storage::log_manager& lm, retry_chain_node& parent) {
    gate_guard guard{_gate};
    auto mlock = co_await ss::get_units(_mutex, 1);
    vlog(
      archival_log.debug,
      "{} Uploading next candidates called for {}",
      parent(),
      _ntp);
    ntp_archiver::batch_result total{};
    // We have to increment last offset to guarantee progress.
    // The manifest's last offset contains committed_offset of the
    // latest uploaded segment but '_policy' requires offset that
    // belongs to the next offset or the gap. No need to do this
    // if there is no segments.
    auto offset = _manifest.size()
                    ? _manifest.get_last_offset() + model::offset(1)
                    : model::offset(0);
    std::vector<ss::future<cloud_storage::upload_result>> flist;
    std::vector<cloud_storage::manifest::segment_meta> meta;
    std::vector<ss::sstring> names;
    std::vector<model::offset> deltas;
    for (size_t i = 0; i < _concurrency; i++) {
        vlog(
          archival_log.debug,
          "{} Uploading next candidates for {}, trying offset {}",
          parent(),
          _ntp,
          offset);
        auto upload = _policy.get_next_candidate(offset, lm);
        if (upload.source.get() == nullptr) {
            vlog(
              archival_log.debug,
              "{} Uploading next candidates for {}, ...skip",
              parent(),
              _ntp);
            break;
        }
        if (_manifest.contains(upload.exposed_name)) {
            // This sholdn't happen normally and indicates an error (e.g.
            // manifest doesn't match the actual data because it was uploaded by
            // different cluster or altered). We can just skip the segment.
            vlog(
              archival_log.warn,
              "{} Uploading next candidates for {}, attempt to re-upload {}",
              parent(),
              _ntp,
              upload);
            const auto& meta = _manifest.get(upload.exposed_name);
            offset = meta->committed_offset + model::offset(1);
            continue;
        }
        auto committed = upload.source->offsets().committed_offset;
        auto base = upload.source->offsets().base_offset;
        offset = committed + model::offset(1);
        deltas.push_back(committed - base);
        flist.emplace_back(upload_segment(upload, parent));
        cloud_storage::manifest::segment_meta m{
          .is_compacted = upload.source->is_compacted_segment(),
          .size_bytes = upload.content_length,
          .base_offset = upload.starting_offset,
          .committed_offset = upload.source->offsets().committed_offset,
        };
        meta.emplace_back(m);
        names.emplace_back(upload.exposed_name);
    }
    if (flist.empty()) {
        vlog(
          archival_log.debug,
          "{} Uploading next candidates for {}, no uploads started ...skip",
          parent(),
          _ntp);
        co_return total;
    }
    auto results = co_await ss::when_all_succeed(begin(flist), end(flist));
    total.num_succeded = std::count(
      begin(results), end(results), cloud_storage::upload_result::success);
    total.num_failed = flist.size() - total.num_succeded;
    for (size_t i = 0; i < results.size(); i++) {
        if (results[i] != cloud_storage::upload_result::success) {
            break;
        }
        _probe.uploaded(deltas[i]);
        _manifest.add(segment_name(names[i]), meta[i]);
    }
    if (total.num_succeded != 0) {
        vlog(
          archival_log.debug,
          "{} Uploading next candidates for {}, re-uploading manifest file",
          parent(),
          _ntp);
        auto res = co_await upload_manifest(parent);
        if (res != cloud_storage::upload_result::success) {
            vlog(
              archival_log.debug,
              "{} Manifest upload for {} failed",
              parent(),
              _ntp);
        }
        _last_upload_time = ss::lowres_clock::now();
    }
    co_return total;
}

} // namespace archival
