// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

//! Futures provides the futures generated by [`Operator`]
//!
//! By using futures, users can add more options for operation.

use chrono::{DateTime, Utc};
use futures::Future;
use std::collections::HashMap;
use std::future::IntoFuture;
use std::ops::RangeBounds;
use std::time::Duration;

use crate::raw::*;
use crate::*;

/// OperatorFuture is the future generated by [`Operator`].
///
/// The future will consume all the input to generate a future.
///
/// # NOTES
///
/// This struct is by design to keep in crate. We don't want
/// users to use this struct directly.
pub struct OperatorFuture<I, O, F: Future<Output = Result<O>>> {
    /// The accessor to the underlying object storage
    acc: Accessor,
    /// The path of string
    path: String,
    /// The input args
    args: I,
    /// The function which will move all the args and return a static future
    f: fn(Accessor, String, I) -> F,
}

impl<I, O, F: Future<Output = Result<O>>> OperatorFuture<I, O, F> {
    /// # NOTES
    ///
    /// This struct is by design to keep in crate. We don't want
    /// users to use this struct directly.
    pub(crate) fn new(
        inner: Accessor,
        path: String,
        args: I,
        f: fn(Accessor, String, I) -> F,
    ) -> Self {
        OperatorFuture {
            acc: inner,
            path,
            args,
            f,
        }
    }
}

impl<I, O, F: Future<Output = Result<O>>> OperatorFuture<I, O, F> {
    /// Change the operation's args.
    fn map(mut self, f: impl FnOnce(I) -> I) -> Self {
        self.args = f(self.args);
        self
    }
}

impl<I, O, F> IntoFuture for OperatorFuture<I, O, F>
where
    F: Future<Output = Result<O>>,
{
    type Output = Result<O>;
    type IntoFuture = F;

    fn into_future(self) -> Self::IntoFuture {
        (self.f)(self.acc, self.path, self.args)
    }
}

/// Future that generated by [`Operator::stat_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FutureStat<F> = OperatorFuture<OpStat, Metadata, F>;

impl<F: Future<Output = Result<Metadata>>> FutureStat<F> {
    /// Set the If-Match for this operation.
    pub fn if_match(self, v: &str) -> Self {
        self.map(|args| args.with_if_match(v))
    }

    /// Set the If-None-Match for this operation.
    pub fn if_none_match(self, v: &str) -> Self {
        self.map(|args| args.with_if_none_match(v))
    }

    /// Set the version for this operation.
    pub fn version(self, v: &str) -> Self {
        self.map(|args| args.with_version(v))
    }
}

/// Future that generated by [`Operator::presign_stat_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FuturePresignStat<F> = OperatorFuture<(OpStat, Duration), PresignedRequest, F>;

impl<F: Future<Output = Result<PresignedRequest>>> FuturePresignStat<F> {
    /// Sets the content-disposition header that should be sent back by the remote read operation.
    pub fn override_content_disposition(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_override_content_disposition(v), dur))
    }

    /// Sets the cache-control header that should be sent back by the remote read operation.
    pub fn override_cache_control(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_override_cache_control(v), dur))
    }

    /// Sets the content-type header that should be sent back by the remote read operation.
    pub fn override_content_type(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_override_content_type(v), dur))
    }

    /// Set the If-Match of the option
    pub fn if_match(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_if_match(v), dur))
    }

    /// Set the If-None-Match of the option
    pub fn if_none_match(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_if_none_match(v), dur))
    }
}

/// Future that generated by [`Operator::presign_read_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FuturePresignRead<F> = OperatorFuture<(OpRead, Duration), PresignedRequest, F>;

impl<F: Future<Output = Result<PresignedRequest>>> FuturePresignRead<F> {
    /// Sets the content-disposition header that should be sent back by the remote read operation.
    pub fn override_content_disposition(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_override_content_disposition(v), dur))
    }

    /// Sets the cache-control header that should be sent back by the remote read operation.
    pub fn override_cache_control(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_override_cache_control(v), dur))
    }

    /// Sets the content-type header that should be sent back by the remote read operation.
    pub fn override_content_type(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_override_content_type(v), dur))
    }

    /// Set the If-Match of the option
    pub fn if_match(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_if_match(v), dur))
    }

    /// Set the If-None-Match of the option
    pub fn if_none_match(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_if_none_match(v), dur))
    }
}

/// Future that generated by [`Operator::presign_write_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FuturePresignWrite<F> = OperatorFuture<(OpWrite, Duration), PresignedRequest, F>;

impl<F: Future<Output = Result<PresignedRequest>>> FuturePresignWrite<F> {
    /// Set the content type of option
    pub fn content_type(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_content_type(v), dur))
    }

    /// Set the content disposition of option
    pub fn content_disposition(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_content_disposition(v), dur))
    }

    /// Set the content type of option
    pub fn cache_control(self, v: &str) -> Self {
        self.map(|(args, dur)| (args.with_cache_control(v), dur))
    }
}

/// Future that generated by [`Operator::read_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FutureRead<F> = OperatorFuture<(OpRead, OpReader), Buffer, F>;

impl<F: Future<Output = Result<Buffer>>> FutureRead<F> {
    /// Set the executor for this operation.
    pub fn executor(self, executor: Executor) -> Self {
        self.map(|(args, op_reader)| (args.with_executor(executor), op_reader))
    }

    /// Set `range` for this `read` request.
    ///
    /// If we have a file with size `n`.
    ///
    /// - `..` means read bytes in range `[0, n)` of file.
    /// - `0..1024` and `..1024` means read bytes in range `[0, 1024)` of file
    /// - `1024..` means read bytes in range `[1024, n)` of file
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::TryStreamExt;
    /// # async fn test(op: Operator) -> Result<()> {
    /// let bs = op.read_with("path/to/file").range(0..1024).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn range(self, range: impl RangeBounds<u64>) -> Self {
        self.map(|(args, op_reader)| (args.with_range(range.into()), op_reader))
    }

    /// Set `concurrent` for the reader.
    ///
    /// OpenDAL by default to write file without concurrent. This is not efficient for cases when users
    /// read large chunks of data. By setting `concurrent`, opendal will read files concurrently
    /// on support storage services.
    ///
    /// By setting `concurrent`, opendal will fetch chunks concurrently with
    /// the given chunk size.
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use opendal::Scheme;
    /// # async fn test(op: Operator) -> Result<()> {
    /// let r = op.read_with("path/to/file").concurrent(8).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn concurrent(self, concurrent: usize) -> Self {
        self.map(|(args, op_reader)| (args, op_reader.with_concurrent(concurrent)))
    }

    /// OpenDAL will use services' preferred chunk size by default. Users can set chunk based on their own needs.
    ///
    /// This following example will make opendal read data in 4MiB chunks:
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use opendal::Scheme;
    /// # async fn test(op: Operator) -> Result<()> {
    /// let r = op.read_with("path/to/file").chunk(4 * 1024 * 1024).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn chunk(self, chunk_size: usize) -> Self {
        self.map(|(args, op_reader)| (args, op_reader.with_chunk(chunk_size)))
    }

    /// Set `version` for this `read` request.
    ///
    /// This feature can be used to retrieve the data of a specified version of the given path.
    ///
    /// If the version doesn't exist, an error with kind [`ErrorKind::NotFound`] will be returned.
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    ///
    /// # async fn test(op: Operator, version: &str) -> Result<()> {
    /// let mut bs = op.read_with("path/to/file").version(version).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn version(self, v: &str) -> Self {
        self.map(|(args, op_reader)| (args.with_version(v), op_reader))
    }

    /// Set `if_match` for this `read` request.
    ///
    /// This feature can be used to check if the file's `ETag` matches the given `ETag`.
    ///
    /// If file exists and it's etag doesn't match, an error with kind [`ErrorKind::ConditionNotMatch`]
    /// will be returned.
    ///
    /// ```
    /// # use opendal::Result;
    /// use opendal::Operator;
    /// # async fn test(op: Operator, etag: &str) -> Result<()> {
    /// let mut metadata = op.read_with("path/to/file").if_match(etag).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_match(self, v: &str) -> Self {
        self.map(|(args, op_reader)| (args.with_if_match(v), op_reader))
    }

    /// Set `if_none_match` for this `read` request.
    ///
    /// This feature can be used to check if the file's `ETag` doesn't match the given `ETag`.
    ///
    /// If file exists and it's etag match, an error with kind [`ErrorKind::ConditionNotMatch`]
    /// will be returned.
    ///
    /// ```
    /// # use opendal::Result;
    /// use opendal::Operator;
    /// # async fn test(op: Operator, etag: &str) -> Result<()> {
    /// let mut metadata = op.read_with("path/to/file").if_none_match(etag).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_none_match(self, v: &str) -> Self {
        self.map(|(args, op_reader)| (args.with_if_none_match(v), op_reader))
    }

    /// ## `if_modified_since`
    ///
    /// Set `if_modified_since` for this `read` request.
    ///
    /// This feature can be used to check if the file has been modified since the given timestamp.
    ///
    /// If file exists and it hasn't been modified since the specified time, an error with kind
    /// [`ErrorKind::ConditionNotMatch`] will be returned.
    ///
    /// ```
    /// # use opendal::Result;
    /// use opendal::Operator;
    /// use chrono::DateTime;
    /// use chrono::Utc;
    /// # async fn test(op: Operator, time: DateTime<Utc>) -> Result<()> {
    /// let mut metadata = op.read_with("path/to/file").if_modified_since(time).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_modified_since(self, v: DateTime<Utc>) -> Self {
        self.map(|(args, op_reader)| (args.with_if_modified_since(v), op_reader))
    }

    /// Set `if_unmodified_since` for this `read` request.
    ///
    /// This feature can be used to check if the file hasn't been modified since the given timestamp.
    ///
    /// If file exists and it has been modified since the specified time, an error with kind
    /// [`ErrorKind::ConditionNotMatch`] will be returned.
    ///
    /// ```
    /// # use opendal::Result;
    /// use opendal::Operator;
    /// use chrono::DateTime;
    /// use chrono::Utc;
    /// # async fn test(op: Operator, time: DateTime<Utc>) -> Result<()> {
    /// let mut metadata = op.read_with("path/to/file").if_unmodified_since(time).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_unmodified_since(self, v: DateTime<Utc>) -> Self {
        self.map(|(args, op_reader)| (args.with_if_unmodified_since(v), op_reader))
    }
}

/// Future that generated by [`Operator::read_with`] or [`Operator::reader_with`].
///
/// Users can add more options by public functions provided by this struct.
///
/// # Notes
///
/// `(OpRead, ())` is a trick to make sure `FutureReader` is different from `FutureRead`
pub type FutureReader<F> = OperatorFuture<(OpRead, OpReader), Reader, F>;

impl<F: Future<Output = Result<Reader>>> FutureReader<F> {
    /// Set `version` for this `reader`.
    ///
    /// This feature can be used to retrieve the data of a specified version of the given path.
    ///
    /// If the version doesn't exist, an error with kind [`ErrorKind::NotFound`] will be returned.
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    ///
    /// # async fn test(op: Operator, version: &str) -> Result<()> {
    /// let mut r = op.reader_with("path/to/file").version(version).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn version(self, v: &str) -> Self {
        self.map(|(op_read, op_reader)| (op_read.with_version(v), op_reader))
    }

    /// Set `concurrent` for the reader.
    ///
    /// OpenDAL by default to write file without concurrent. This is not efficient for cases when users
    /// read large chunks of data. By setting `concurrent`, opendal will reading files concurrently
    /// on support storage services.
    ///
    /// By setting `concurrent`, opendal will fetch chunks concurrently with
    /// the give chunk size.
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use opendal::Scheme;
    /// # async fn test(op: Operator) -> Result<()> {
    /// let r = op.reader_with("path/to/file").concurrent(8).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn concurrent(self, concurrent: usize) -> Self {
        self.map(|(op_read, op_reader)| (op_read, op_reader.with_concurrent(concurrent)))
    }

    /// OpenDAL will use services' preferred chunk size by default. Users can set chunk based on their own needs.
    ///
    /// This following example will make opendal read data in 4MiB chunks:
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use opendal::Scheme;
    /// # async fn test(op: Operator) -> Result<()> {
    /// let r = op
    ///     .reader_with("path/to/file")
    ///     .chunk(4 * 1024 * 1024)
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn chunk(self, chunk_size: usize) -> Self {
        self.map(|(op_read, op_reader)| (op_read, op_reader.with_chunk(chunk_size)))
    }

    /// Controls the optimization strategy for range reads in [`Reader::fetch`].
    ///
    /// When performing range reads, if the gap between two requested ranges is smaller than
    /// the configured `gap` size, OpenDAL will merge these ranges into a single read request
    /// and discard the unrequested data in between. This helps reduce the number of API calls
    /// to remote storage services.
    ///
    /// This optimization is particularly useful when performing multiple small range reads
    /// that are close to each other, as it reduces the overhead of multiple network requests
    /// at the cost of transferring some additional data.
    ///
    /// In this example, if two requested ranges are separated by less than 1MiB,
    /// they will be merged into a single read request:
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use opendal::Scheme;
    /// # async fn test(op: Operator) -> Result<()> {
    /// let r = op
    ///     .reader_with("path/to/file")
    ///     .chunk(4 * 1024 * 1024)
    ///     .gap(1024 * 1024)  // 1MiB gap
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn gap(self, gap_size: usize) -> Self {
        self.map(|(op_read, op_reader)| (op_read, op_reader.with_gap(gap_size)))
    }

    /// Set `if-match` for this `read` request.
    ///
    /// This feature can be used to check if the file's `ETag` matches the given `ETag`.
    ///
    /// If file exists and it's etag doesn't match, an error with kind [`ErrorKind::ConditionNotMatch`]
    /// will be returned.
    ///
    /// ```
    /// # use opendal::Result;
    /// use opendal::Operator;
    /// # async fn test(op: Operator, etag: &str) -> Result<()> {
    /// let mut r = op.reader_with("path/to/file").if_match(etag).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_match(self, etag: &str) -> Self {
        self.map(|(op_read, op_reader)| (op_read.with_if_match(etag), op_reader))
    }

    /// Set `if-none-match` for this `read` request.
    ///
    /// This feature can be used to check if the file's `ETag` doesn't match the given `ETag`.
    ///
    /// If file exists and it's etag match, an error with kind [`ErrorKind::ConditionNotMatch`]
    /// will be returned.
    ///
    /// ```
    /// # use opendal::Result;
    /// use opendal::Operator;
    /// # async fn test(op: Operator, etag: &str) -> Result<()> {
    /// let mut r = op.reader_with("path/to/file").if_none_match(etag).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_none_match(self, etag: &str) -> Self {
        self.map(|(op_read, op_reader)| (op_read.with_if_none_match(etag), op_reader))
    }

    /// Set `if-modified-since` for this `read` request.
    ///
    /// This feature can be used to check if the file has been modified since the given timestamp.
    ///
    /// If file exists and it hasn't been modified since the specified time, an error with kind
    /// [`ErrorKind::ConditionNotMatch`] will be returned.
    ///
    /// ```
    /// # use opendal::Result;
    /// use opendal::Operator;
    /// use chrono::DateTime;
    /// use chrono::Utc;
    /// # async fn test(op: Operator, time: DateTime<Utc>) -> Result<()> {
    /// let mut r = op.reader_with("path/to/file").if_modified_since(time).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_modified_since(self, v: DateTime<Utc>) -> Self {
        self.map(|(op_read, op_reader)| (op_read.with_if_modified_since(v), op_reader))
    }

    /// Set `if-unmodified-since` for this `read` request.
    ///
    /// This feature can be used to check if the file hasn't been modified since the given timestamp.
    ///
    /// If file exists and it has been modified since the specified time, an error with kind
    /// [`ErrorKind::ConditionNotMatch`] will be returned.
    ///
    /// ```
    /// # use opendal::Result;
    /// use opendal::Operator;
    /// use chrono::DateTime;
    /// use chrono::Utc;
    /// # async fn test(op: Operator, time: DateTime<Utc>) -> Result<()> {
    /// let mut r = op.reader_with("path/to/file").if_unmodified_since(time).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_unmodified_since(self, v: DateTime<Utc>) -> Self {
        self.map(|(op_read, op_reader)| (op_read.with_if_unmodified_since(v), op_reader))
    }
}

/// Future that generated by [`Operator::write_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FutureWrite<F> = OperatorFuture<(OpWrite, OpWriter, Buffer), (), F>;

impl<F: Future<Output = Result<()>>> FutureWrite<F> {
    /// Set the executor for this operation.
    pub fn executor(self, executor: Executor) -> Self {
        self.map(|(args, options, bs)| (args.with_executor(executor), options, bs))
    }

    /// Sets append mode for this write request.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_can_append`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - By default, write operations overwrite existing files
    /// - When append is set to true:
    ///   - New data will be appended to the end of existing file
    ///   - If file doesn't exist, it will be created
    /// - If not supported, will return an error
    ///
    /// This operation allows adding data to existing files instead of overwriting them.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let _ = op.write_with("path/to/file", vec![0; 4096]).append(true).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn append(self, v: bool) -> Self {
        self.map(|(args, options, bs)| (args.with_append(v), options, bs))
    }

    /// Sets chunk size for buffered writes.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_multi_min_size`] and [`Capability::write_multi_max_size`] for size limits.
    ///
    /// ### Behavior
    ///
    /// - By default, OpenDAL sets optimal chunk size based on service capabilities
    /// - When chunk size is set:
    ///   - Data will be buffered until reaching chunk size
    ///   - One API call will be made per chunk
    ///   - Last chunk may be smaller than chunk size
    /// - Important considerations:
    ///   - Some services require minimum chunk sizes (e.g. S3's EntityTooSmall error)
    ///   - Smaller chunks increase API calls and costs
    ///   - Larger chunks increase memory usage, but improve performance and reduce costs
    ///
    /// ### Performance Impact
    ///
    /// Setting appropriate chunk size can:
    /// - Reduce number of API calls
    /// - Improve overall throughput
    /// - Lower operation costs
    /// - Better utilize network bandwidth
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// // Set 8MiB chunk size - data will be sent in one API call at close
    /// let _ = op
    ///     .write_with("path/to/file", vec![0; 4096])
    ///     .chunk(8 * 1024 * 1024)
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn chunk(self, v: usize) -> Self {
        self.map(|(args, options, bs)| (args, options.with_chunk(v), bs))
    }

    /// Sets concurrent write operations for this writer.
    ///
    /// ## Behavior
    ///
    /// - By default, OpenDAL writes files sequentially
    /// - When concurrent is set:
    ///   - Multiple write operations can execute in parallel
    ///   - Write operations return immediately without waiting if tasks space are available
    ///   - Close operation ensures all writes complete in order
    ///   - Memory usage increases with concurrency level
    /// - If not supported, falls back to sequential writes
    ///
    /// This feature significantly improves performance when:
    /// - Writing large files
    /// - Network latency is high
    /// - Storage service supports concurrent uploads like multipart uploads
    ///
    /// ## Performance Impact
    ///
    /// Setting appropriate concurrency can:
    /// - Increase write throughput
    /// - Reduce total write time
    /// - Better utilize available bandwidth
    /// - Trade memory for performance
    ///
    /// ## Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// // Enable concurrent writes with 8 parallel operations at 128B chunk.
    /// let _ = op.write_with("path/to/file", vec![0; 4096]).chunk(128).concurrent(8).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn concurrent(self, v: usize) -> Self {
        self.map(|(args, options, bs)| (args.with_concurrent(v), options, bs))
    }

    /// Sets Cache-Control header for this write operation.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_cache_control`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, sets Cache-Control as system metadata on the target file
    /// - The value should follow HTTP Cache-Control header format
    /// - If not supported, the value will be ignored
    ///
    /// This operation allows controlling caching behavior for the written content.
    ///
    /// ### Use Cases
    ///
    /// - Setting browser cache duration
    /// - Configuring CDN behavior
    /// - Optimizing content delivery
    /// - Managing cache invalidation
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// // Cache content for 7 days (604800 seconds)
    /// let _ = op
    ///     .write_with("path/to/file", vec![0; 4096])
    ///     .cache_control("max-age=604800")
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// ### References
    ///
    /// - [MDN Cache-Control](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control)
    /// - [RFC 7234 Section 5.2](https://tools.ietf.org/html/rfc7234#section-5.2)
    pub fn cache_control(self, v: &str) -> Self {
        self.map(|(args, options, bs)| (args.with_cache_control(v), options, bs))
    }

    /// Sets `Content-Type` header for this write operation.
    ///
    /// ## Capability
    ///
    /// Check [`Capability::write_with_content_type`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, sets Content-Type as system metadata on the target file
    /// - The value should follow MIME type format (e.g. "text/plain", "image/jpeg")
    /// - If not supported, the value will be ignored
    ///
    /// This operation allows specifying the media type of the content being written.
    ///
    /// ## Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// // Set content type for plain text file
    /// let _ = op
    ///     .write_with("path/to/file", vec![0; 4096])
    ///     .content_type("text/plain")
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn content_type(self, v: &str) -> Self {
        self.map(|(args, options, bs)| (args.with_content_type(v), options, bs))
    }

    /// ## `content_disposition`
    ///
    /// Sets Content-Disposition header for this write request.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_content_disposition`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, sets Content-Disposition as system metadata on the target file
    /// - The value should follow HTTP Content-Disposition header format
    /// - Common values include:
    ///   - `inline` - Content displayed within browser
    ///   - `attachment` - Content downloaded as file
    ///   - `attachment; filename="example.jpg"` - Downloaded with specified filename
    /// - If not supported, the value will be ignored
    ///
    /// This operation allows controlling how the content should be displayed or downloaded.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let _ = op
    ///     .write_with("path/to/file", vec![0; 4096])
    ///     .content_disposition("attachment; filename=\"filename.jpg\"")
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn content_disposition(self, v: &str) -> Self {
        self.map(|(args, options, bs)| (args.with_content_disposition(v), options, bs))
    }

    /// Sets Content-Encoding header for this write request.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_content_encoding`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, sets Content-Encoding as system metadata on the target file
    /// - The value should follow HTTP Content-Encoding header format
    /// - Common values include:
    ///   - `gzip` - Content encoded using gzip compression
    ///   - `deflate` - Content encoded using deflate compression
    ///   - `br` - Content encoded using Brotli compression
    ///   - `identity` - No encoding applied (default value)
    /// - If not supported, the value will be ignored
    ///
    /// This operation allows specifying the encoding applied to the content being written.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let _ = op
    ///     .write_with("path/to/file", vec![0; 4096])
    ///     .content_encoding("gzip")
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn content_encoding(self, v: &str) -> Self {
        self.map(|(args, options, bs)| (args.with_content_encoding(v), options, bs))
    }

    /// Sets If-Match header for this write request.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_if_match`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, the write operation will only succeed if the target's ETag matches the specified value
    /// - The value should be a valid ETag string
    /// - Common values include:
    ///   - A specific ETag value like `"686897696a7c876b7e"`
    ///   - `*` - Matches any existing resource
    /// - If not supported, the value will be ignored
    ///
    /// This operation provides conditional write functionality based on ETag matching,
    /// helping prevent unintended overwrites in concurrent scenarios.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let _ = op
    ///     .write_with("path/to/file", vec![0; 4096])
    ///     .if_match("\"686897696a7c876b7e\"")
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_match(self, s: &str) -> Self {
        self.map(|(args, options, bs)| (args.with_if_match(s), options, bs))
    }

    /// Sets If-None-Match header for this write request.
    ///
    /// Note: Certain services, like `s3`, support `if_not_exists` but not `if_none_match`.
    /// Use `if_not_exists` if you only want to check whether a file exists.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_if_none_match`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, the write operation will only succeed if the target's ETag does not match the specified value
    /// - The value should be a valid ETag string
    /// - Common values include:
    ///   - A specific ETag value like `"686897696a7c876b7e"`
    ///   - `*` - Matches if the resource does not exist
    /// - If not supported, the value will be ignored
    ///
    /// This operation provides conditional write functionality based on ETag non-matching,
    /// useful for preventing overwriting existing resources or ensuring unique writes.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let _ = op
    ///     .write_with("path/to/file", vec![0; 4096])
    ///     .if_none_match("\"686897696a7c876b7e\"")
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_none_match(self, s: &str) -> Self {
        self.map(|(args, options, bs)| (args.with_if_none_match(s), options, bs))
    }

    /// Sets the condition that write operation will succeed only if target does not exist.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_if_not_exists`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, the write operation will only succeed if the target path does not exist
    /// - Will return error if target already exists
    /// - If not supported, the value will be ignored
    ///
    /// This operation provides a way to ensure write operations only create new resources
    /// without overwriting existing ones, useful for implementing "create if not exists" logic.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let _ = op
    ///     .write_with("path/to/file", vec![0; 4096])
    ///     .if_not_exists(true)
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_not_exists(self, b: bool) -> Self {
        self.map(|(args, options, bs)| (args.with_if_not_exists(b), options, bs))
    }

    /// Sets user metadata for this write request.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_user_metadata`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, the user metadata will be attached to the object during write
    /// - Accepts key-value pairs where both key and value are strings
    /// - Keys are case-insensitive in most services
    /// - Services may have limitations for user metadata, for example:
    ///   - Key length is typically limited (e.g., 1024 bytes)
    ///   - Value length is typically limited (e.g., 4096 bytes)
    ///   - Total metadata size might be limited
    ///   - Some characters might be forbidden in keys
    /// - If not supported, the metadata will be ignored
    ///
    /// User metadata provides a way to attach custom metadata to objects during write operations.
    /// This metadata can be retrieved later when reading the object.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let _ = op
    ///     .write_with("path/to/file", vec![0; 4096])
    ///     .user_metadata([
    ///         ("language".to_string(), "rust".to_string()),
    ///         ("author".to_string(), "OpenDAL".to_string()),
    ///     ])
    ///     .await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn user_metadata(self, data: impl IntoIterator<Item = (String, String)>) -> Self {
        self.map(|(args, options, bs)| {
            (
                args.with_user_metadata(HashMap::from_iter(data)),
                options,
                bs,
            )
        })
    }
}

/// Future that generated by [`Operator::writer_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FutureWriter<F> = OperatorFuture<(OpWrite, OpWriter), Writer, F>;

impl<F: Future<Output = Result<Writer>>> FutureWriter<F> {
    /// Set the executor for this operation.
    pub fn executor(self, executor: Executor) -> Self {
        self.map(|(args, options)| (args.with_executor(executor), options))
    }

    /// Sets append mode for this write request.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_can_append`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - By default, write operations overwrite existing files
    /// - When append is set to true:
    ///   - New data will be appended to the end of existing file
    ///   - If file doesn't exist, it will be created
    /// - If not supported, will return an error
    ///
    /// This operation allows adding data to existing files instead of overwriting them.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let mut w = op.writer_with("path/to/file").append(true).await?;
    /// w.write(vec![0; 4096]).await?;
    /// w.write(vec![1; 4096]).await?;
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn append(self, v: bool) -> Self {
        self.map(|(args, options)| (args.with_append(v), options))
    }

    /// Sets chunk size for buffered writes.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_multi_min_size`] and [`Capability::write_multi_max_size`] for size limits.
    ///
    /// ### Behavior
    ///
    /// - By default, OpenDAL sets optimal chunk size based on service capabilities
    /// - When chunk size is set:
    ///   - Data will be buffered until reaching chunk size
    ///   - One API call will be made per chunk
    ///   - Last chunk may be smaller than chunk size
    /// - Important considerations:
    ///   - Some services require minimum chunk sizes (e.g. S3's EntityTooSmall error)
    ///   - Smaller chunks increase API calls and costs
    ///   - Larger chunks increase memory usage, but improve performance and reduce costs
    ///
    /// ### Performance Impact
    ///
    /// Setting appropriate chunk size can:
    /// - Reduce number of API calls
    /// - Improve overall throughput
    /// - Lower operation costs
    /// - Better utilize network bandwidth
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// // Set 8MiB chunk size - data will be sent in one API call at close
    /// let mut w = op
    ///     .writer_with("path/to/file")
    ///     .chunk(8 * 1024 * 1024)
    ///     .await?;
    /// w.write(vec![0; 4096]).await?;
    /// w.write(vec![1; 4096]).await?;
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn chunk(self, v: usize) -> Self {
        self.map(|(args, options)| (args, options.with_chunk(v)))
    }

    /// Sets concurrent write operations for this writer.
    ///
    /// ## Behavior
    ///
    /// - By default, OpenDAL writes files sequentially
    /// - When concurrent is set:
    ///   - Multiple write operations can execute in parallel
    ///   - Write operations return immediately without waiting if tasks space are available
    ///   - Close operation ensures all writes complete in order
    ///   - Memory usage increases with concurrency level
    /// - If not supported, falls back to sequential writes
    ///
    /// This feature significantly improves performance when:
    /// - Writing large files
    /// - Network latency is high
    /// - Storage service supports concurrent uploads like multipart uploads
    ///
    /// ## Performance Impact
    ///
    /// Setting appropriate concurrency can:
    /// - Increase write throughput
    /// - Reduce total write time
    /// - Better utilize available bandwidth
    /// - Trade memory for performance
    ///
    /// ## Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// // Enable concurrent writes with 8 parallel operations
    /// let mut w = op.writer_with("path/to/file").concurrent(8).await?;
    ///
    /// // First write starts immediately
    /// w.write(vec![0; 4096]).await?;
    ///
    /// // Second write runs concurrently with first
    /// w.write(vec![1; 4096]).await?;
    ///
    /// // Ensures all writes complete successfully and in order
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn concurrent(self, v: usize) -> Self {
        self.map(|(args, options)| (args.with_concurrent(v), options))
    }

    /// Sets Cache-Control header for this write operation.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_cache_control`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, sets Cache-Control as system metadata on the target file
    /// - The value should follow HTTP Cache-Control header format
    /// - If not supported, the value will be ignored
    ///
    /// This operation allows controlling caching behavior for the written content.
    ///
    /// ### Use Cases
    ///
    /// - Setting browser cache duration
    /// - Configuring CDN behavior
    /// - Optimizing content delivery
    /// - Managing cache invalidation
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// // Cache content for 7 days (604800 seconds)
    /// let mut w = op
    ///     .writer_with("path/to/file")
    ///     .cache_control("max-age=604800")
    ///     .await?;
    /// w.write(vec![0; 4096]).await?;
    /// w.write(vec![1; 4096]).await?;
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// ### References
    ///
    /// - [MDN Cache-Control](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control)
    /// - [RFC 7234 Section 5.2](https://tools.ietf.org/html/rfc7234#section-5.2)
    pub fn cache_control(self, v: &str) -> Self {
        self.map(|(args, options)| (args.with_cache_control(v), options))
    }

    /// Sets `Content-Type` header for this write operation.
    ///
    /// ## Capability
    ///
    /// Check [`Capability::write_with_content_type`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, sets Content-Type as system metadata on the target file
    /// - The value should follow MIME type format (e.g. "text/plain", "image/jpeg")
    /// - If not supported, the value will be ignored
    ///
    /// This operation allows specifying the media type of the content being written.
    ///
    /// ## Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// // Set content type for plain text file
    /// let mut w = op
    ///     .writer_with("path/to/file")
    ///     .content_type("text/plain")
    ///     .await?;
    /// w.write(vec![0; 4096]).await?;
    /// w.write(vec![1; 4096]).await?;
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn content_type(self, v: &str) -> Self {
        self.map(|(args, options)| (args.with_content_type(v), options))
    }

    /// ## `content_disposition`
    ///
    /// Sets Content-Disposition header for this write request.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_content_disposition`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, sets Content-Disposition as system metadata on the target file
    /// - The value should follow HTTP Content-Disposition header format
    /// - Common values include:
    ///   - `inline` - Content displayed within browser
    ///   - `attachment` - Content downloaded as file
    ///   - `attachment; filename="example.jpg"` - Downloaded with specified filename
    /// - If not supported, the value will be ignored
    ///
    /// This operation allows controlling how the content should be displayed or downloaded.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let mut w = op
    ///     .writer_with("path/to/file")
    ///     .content_disposition("attachment; filename=\"filename.jpg\"")
    ///     .await?;
    /// w.write(vec![0; 4096]).await?;
    /// w.write(vec![1; 4096]).await?;
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn content_disposition(self, v: &str) -> Self {
        self.map(|(args, options)| (args.with_content_disposition(v), options))
    }

    /// Sets Content-Encoding header for this write request.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_content_encoding`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, sets Content-Encoding as system metadata on the target file
    /// - The value should follow HTTP Content-Encoding header format
    /// - Common values include:
    ///   - `gzip` - Content encoded using gzip compression
    ///   - `deflate` - Content encoded using deflate compression
    ///   - `br` - Content encoded using Brotli compression
    ///   - `identity` - No encoding applied (default value)
    /// - If not supported, the value will be ignored
    ///
    /// This operation allows specifying the encoding applied to the content being written.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let mut w = op
    ///     .writer_with("path/to/file")
    ///     .content_encoding("gzip")
    ///     .await?;
    /// w.write(vec![0; 4096]).await?;
    /// w.write(vec![1; 4096]).await?;
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn content_encoding(self, v: &str) -> Self {
        self.map(|(args, options)| (args.with_content_encoding(v), options))
    }

    /// Sets If-Match header for this write request.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_if_match`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, the write operation will only succeed if the target's ETag matches the specified value
    /// - The value should be a valid ETag string
    /// - Common values include:
    ///   - A specific ETag value like `"686897696a7c876b7e"`
    ///   - `*` - Matches any existing resource
    /// - If not supported, the value will be ignored
    ///
    /// This operation provides conditional write functionality based on ETag matching,
    /// helping prevent unintended overwrites in concurrent scenarios.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let mut w = op
    ///     .writer_with("path/to/file")
    ///     .if_match("\"686897696a7c876b7e\"")
    ///     .await?;
    /// w.write(vec![0; 4096]).await?;
    /// w.write(vec![1; 4096]).await?;
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_match(self, s: &str) -> Self {
        self.map(|(args, options)| (args.with_if_match(s), options))
    }

    /// Sets If-None-Match header for this write request.
    ///
    /// Note: Certain services, like `s3`, support `if_not_exists` but not `if_none_match`.
    /// Use `if_not_exists` if you only want to check whether a file exists.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_if_none_match`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, the write operation will only succeed if the target's ETag does not match the specified value
    /// - The value should be a valid ETag string
    /// - Common values include:
    ///   - A specific ETag value like `"686897696a7c876b7e"`
    ///   - `*` - Matches if the resource does not exist
    /// - If not supported, the value will be ignored
    ///
    /// This operation provides conditional write functionality based on ETag non-matching,
    /// useful for preventing overwriting existing resources or ensuring unique writes.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let mut w = op
    ///     .writer_with("path/to/file")
    ///     .if_none_match("\"686897696a7c876b7e\"")
    ///     .await?;
    /// w.write(vec![0; 4096]).await?;
    /// w.write(vec![1; 4096]).await?;
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_none_match(self, s: &str) -> Self {
        self.map(|(args, options)| (args.with_if_none_match(s), options))
    }

    /// Sets the condition that write operation will succeed only if target does not exist.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_if_not_exists`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, the write operation will only succeed if the target path does not exist
    /// - Will return error if target already exists
    /// - If not supported, the value will be ignored
    ///
    /// This operation provides a way to ensure write operations only create new resources
    /// without overwriting existing ones, useful for implementing "create if not exists" logic.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let mut w = op
    ///     .writer_with("path/to/file")
    ///     .if_not_exists(true)
    ///     .await?;
    /// w.write(vec![0; 4096]).await?;
    /// w.write(vec![1; 4096]).await?;
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn if_not_exists(self, b: bool) -> Self {
        self.map(|(args, options)| (args.with_if_not_exists(b), options))
    }

    /// Sets user metadata for this write request.
    ///
    /// ### Capability
    ///
    /// Check [`Capability::write_with_user_metadata`] before using this feature.
    ///
    /// ### Behavior
    ///
    /// - If supported, the user metadata will be attached to the object during write
    /// - Accepts key-value pairs where both key and value are strings
    /// - Keys are case-insensitive in most services
    /// - Services may have limitations for user metadata, for example:
    ///   - Key length is typically limited (e.g., 1024 bytes)
    ///   - Value length is typically limited (e.g., 4096 bytes)
    ///   - Total metadata size might be limited
    ///   - Some characters might be forbidden in keys
    /// - If not supported, the metadata will be ignored
    ///
    /// User metadata provides a way to attach custom metadata to objects during write operations.
    /// This metadata can be retrieved later when reading the object.
    ///
    /// ### Example
    ///
    /// ```
    /// # use opendal::Result;
    /// # use opendal::Operator;
    /// # use futures::StreamExt;
    /// # use futures::SinkExt;
    /// use bytes::Bytes;
    ///
    /// # async fn test(op: Operator) -> Result<()> {
    /// let mut w = op
    ///     .writer_with("path/to/file")
    ///     .user_metadata([
    ///         ("content-type".to_string(), "text/plain".to_string()),
    ///         ("author".to_string(), "OpenDAL".to_string()),
    ///     ])
    ///     .await?;
    /// w.write(vec![0; 4096]).await?;
    /// w.close().await?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn user_metadata(self, data: impl IntoIterator<Item = (String, String)>) -> Self {
        self.map(|(args, options)| (args.with_user_metadata(HashMap::from_iter(data)), options))
    }
}

/// Future that generated by [`Operator::delete_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FutureDelete<F> = OperatorFuture<OpDelete, (), F>;

impl<F: Future<Output = Result<()>>> FutureDelete<F> {
    /// Change the version of this delete operation.
    pub fn version(self, v: &str) -> Self {
        self.map(|args| args.with_version(v))
    }
}

/// Future that generated by [`Operator::deleter_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FutureDeleter<F> = OperatorFuture<OpDeleter, (), F>;

/// Future that generated by [`Operator::list_with`] or [`Operator::lister_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FutureList<F> = OperatorFuture<OpList, Vec<Entry>, F>;

impl<F: Future<Output = Result<Vec<Entry>>>> FutureList<F> {
    /// The limit passed to underlying service to specify the max results
    /// that could return per-request.
    ///
    /// Users could use this to control the memory usage of list operation.
    pub fn limit(self, v: usize) -> Self {
        self.map(|args| args.with_limit(v))
    }

    /// The start_after passes to underlying service to specify the specified key
    /// to start listing from.
    pub fn start_after(self, v: &str) -> Self {
        self.map(|args| args.with_start_after(v))
    }

    /// The recursive is used to control whether the list operation is recursive.
    ///
    /// - If `false`, list operation will only list the entries under the given path.
    /// - If `true`, list operation will list all entries that starts with given path.
    ///
    /// Default to `false`.
    pub fn recursive(self, v: bool) -> Self {
        self.map(|args| args.with_recursive(v))
    }

    /// The version is used to control whether the object versions should be returned.
    ///
    /// - If `false`, list operation will not return with object versions
    /// - If `true`, list operation will return with object versions if object versioning is supported
    ///   by the underlying service
    ///
    /// Default to `false`
    #[deprecated(since = "0.51.1", note = "use versions instead")]
    pub fn version(self, v: bool) -> Self {
        self.map(|args| args.with_versions(v))
    }

    /// Controls whether the `list` operation should return file versions.
    ///
    /// This function allows you to specify if the `list` operation, when executed, should include
    /// information about different versions of files, if versioning is supported and enabled.
    ///
    /// If `true`, subsequent `list` operations will include version information for each file.
    /// If `false`, version information will be omitted from the `list` results.
    ///
    /// Default to `false`
    pub fn versions(self, v: bool) -> Self {
        self.map(|args| args.with_versions(v))
    }

    /// Controls whether the `list` operation should include deleted files (or versions).
    ///
    /// This function allows you to specify if the `list` operation, when executed, should include
    /// entries for files or versions that have been marked as deleted. This is particularly relevant
    /// in object storage systems that support soft deletion or versioning.
    ///
    /// If `true`, subsequent `list` operations will include deleted files or versions.
    /// If `false`, deleted files or versions will be excluded from the `list` results.
    pub fn deleted(self, v: bool) -> Self {
        self.map(|args| args.with_deleted(v))
    }
}

/// Future that generated by [`Operator::list_with`] or [`Operator::lister_with`].
///
/// Users can add more options by public functions provided by this struct.
pub type FutureLister<F> = OperatorFuture<OpList, Lister, F>;

impl<F: Future<Output = Result<Lister>>> FutureLister<F> {
    /// The limit passed to underlying service to specify the max results
    /// that could return per-request.
    ///
    /// Users could use this to control the memory usage of list operation.
    pub fn limit(self, v: usize) -> Self {
        self.map(|args| args.with_limit(v))
    }

    /// The start_after passes to underlying service to specify the specified key
    /// to start listing from.
    pub fn start_after(self, v: &str) -> Self {
        self.map(|args| args.with_start_after(v))
    }

    /// The recursive is used to control whether the list operation is recursive.
    ///
    /// - If `false`, list operation will only list the entries under the given path.
    /// - If `true`, list operation will list all entries that starts with given path.
    ///
    /// Default to `false`.
    pub fn recursive(self, v: bool) -> Self {
        self.map(|args| args.with_recursive(v))
    }

    /// The version is used to control whether the object versions should be returned.
    ///
    /// - If `false`, list operation will not return with object versions
    /// - If `true`, list operation will return with object versions if object versioning is supported
    ///   by the underlying service
    ///
    /// Default to `false`
    #[deprecated(since = "0.51.1", note = "use versions instead")]
    pub fn version(self, v: bool) -> Self {
        self.map(|args| args.with_versions(v))
    }

    /// Controls whether the `list` operation should return file versions.
    ///
    /// This function allows you to specify if the `list` operation, when executed, should include
    /// information about different versions of files, if versioning is supported and enabled.
    ///
    /// If `true`, subsequent `list` operations will include version information for each file.
    /// If `false`, version information will be omitted from the `list` results.
    ///
    /// Default to `false`
    pub fn versions(self, v: bool) -> Self {
        self.map(|args| args.with_versions(v))
    }

    /// Controls whether the `list` operation should include deleted files (or versions).
    ///
    /// This function allows you to specify if the `list` operation, when executed, should include
    /// entries for files or versions that have been marked as deleted. This is particularly relevant
    /// in object storage systems that support soft deletion or versioning.
    ///
    /// If `true`, subsequent `list` operations will include deleted files or versions.
    /// If `false`, deleted files or versions will be excluded from the `list` results.
    pub fn deleted(self, v: bool) -> Self {
        self.map(|args| args.with_deleted(v))
    }
}
