use hdfs_native_object_store::HdfsObjectStore;
use object_store::path::Path;
use object_store::{ObjectStore, PutPayload, WriteMultipart};

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    unsafe {
        std::env::set_var("HADOOP_USER_NAME", "vision");
    }

    let store = HdfsObjectStore::with_url("hdfs://cluster1:8020")?;

    let _ = delete(&store, "data/test/small_file").await;
    let path = Path::from("data/test/small_file");
    let payload = PutPayload::from_static(b"hello");
    store.put(&path, payload).await?;

    let _ = delete(&store, "data/test/large_file").await;
    let path = Path::from("data/test/large_file");
    let upload = store.put_multipart(&path).await?;
    let mut writer = WriteMultipart::new(upload);
    for i in 0..1000 {
        let msg = format!("{i} hello\n");
        writer.write(msg.as_bytes());
    }
    writer.finish().await?;

    Ok(())
}

async fn delete(store: &HdfsObjectStore, object: &str) -> anyhow::Result<()> {
    let path = Path::from(object);
    store.delete(&path).await?;
    Ok(())
}
