/*!
 * gfs client module
 *
 */
use crate::{Router, *};
use anyhow::{anyhow, bail};
use axum::{body::Bytes, http::StatusCode};
use futures::{Stream, TryStreamExt};
use reqwest::header::HeaderMap;
use std::borrow::Cow;
use std::io;
use std::io::Write;
use std::path::PathBuf;
use tokio;
use tokio::io::AsyncReadExt;
use tokio::{fs::File, io::BufWriter};
use tokio_util::io::StreamReader;
use tower::BoxError;

#[cfg(feature = "rotatelog")]
use log::{debug, error, info, warn};
#[cfg(feature = "tracelog")]
use tracing::{debug, error, info};
/**
 *  write reqwest::multipart::Part to gfs as file
 *  服务端接收到用户提交的form file后，可以直接构造为Part，写入gfs
 *  gfs_writer_url = router.get_writer_url(router.cur_dynasty, ihash)?;
 *  such as gfs_writer_url = "http://localhost:9996/form/file";
 *  file_name is only file_name, the last element, , 否则，服务端视为非法，拒绝写入；
 */

pub(crate) async fn write_to_gfs_with_part(
    gfs_writer_url: &str,
    file_name_only: &str,
    part: reqwest::multipart::Part,
) -> Result<GfsResponse> {
    if !filename_is_valid(file_name_only) {
        bail!("file_name_only is valid!")
    }

    let client = reqwest::Client::new();
    let mut headers = HeaderMap::new();
    headers.insert("Authorization", format!("Bearer {}", AUTH_TOKEN).parse()?);
    let fname_form = String::from(file_name_only);
    // 一定要有file_name方法，且参数不能为空，否则数据上传失败
    //let part = reqwest::multipart::Part::bytes(Cow::from(file_byte)).file_name(fname);

    let form = reqwest::multipart::Form::new()
        .part("file", part)
        .text("file_type", "stream")
        .text("file_name", fname_form);

    let r = client
        .post(gfs_writer_url)
        .headers(headers)
        //  .form(&params)
        .multipart(form)
        .send()
        .await?
        .json::<Vec<GfsResponse>>()
        .await?;

    // println!("{:#?}", r);
    if r.len() > 0 {
        return Ok(r[0].clone());
    }
    Err(anyhow!("send file failed!"))
}

/**
 *  write reqwest::multipart::Part to gfs  *  
 *  区别 gfs_write_part, 通过参数router，可以完成gfs_write_url的获取，从而让开发者更透明。
 *  file_name_only is only file_name, the last element, 否则，服务端视为非法，拒绝写入；
 */
pub async fn gfsc_write_with_part(
    router: &GfsRouter,
    file_name_only: &str,
    part: reqwest::multipart::Part,
) -> Result<GfsResponse> {
    let ihash = string_hash256(file_name_only);
    let gfs_writer_url = router.get_writer_url(router.cur_dynasty, ihash)?;
    write_to_gfs_with_part(gfs_writer_url.as_str(), file_name_only, part).await
}

/**
write a local file to gfs, file_name is full_path_name;
 * 解析file_name,获取获取文件名后会根据router，提交给对应的Node writer
 */

pub async fn gfsc_write_file(router: &GfsRouter, file_name: &str) -> Result<GfsResponse> {
    let filename = PathBuf::from(file_name);
    if !filename.exists() {
        bail!("no such file");
    }

    let only_fname = filename
        .file_name()
        .unwrap_or_default()
        .to_str()
        .unwrap_or_default();

    let file_byte = std::fs::read(PathBuf::from(file_name))?;
    // 一定要有file_name方法，且参数不能为空，否则数据上传失败
    let file_name = only_fname.to_string(); //filename.file_name().unwrap_or_default().to_string_lossy().to_string();
    let part = reqwest::multipart::Part::bytes(Cow::from(file_byte)).file_name(file_name);
    gfsc_write_with_part(router, only_fname, part).await
}

/**
清空特定的record类型文件内容（非算子的计算记录文件）
 - 类似gfsc_record_clear，只是减少了不必要的输入参数。 
 */
pub async fn gfsc_file_clear(
    router: &GfsRouter,
    ihash: i32,
    idx: i32,
    file_name: &str,   
) -> Result<()> {      
    let writer = router.get_writer(router.cur_dynasty, ihash)?;
    gfsc_server_record(&writer, ihash, idx, file_name, RECORD_CMD_CLEAR, "null", -1, bytes::Bytes::new()).await
}

/**
清空特定算子对应的record类型文件内容
 - 系统不会同步，执行gfsc_special_sync也无法同步（目前的同步策略代码需要调整！！当前是slave从master读到空值，就放弃写入了 ） 
 */
pub async fn gfsc_record_clear(
    router: &GfsRouter,
    ihash: i32,
    idx: i32, 
    operator_name:&str,    
) -> Result<()> {      
    let writer = router.get_writer(router.cur_dynasty, ihash)?;
    let filename = gfsc_parallel_record_name(operator_name);
    gfsc_server_record(&writer, ihash, idx, &filename, RECORD_CMD_CLEAR, operator_name, -1, bytes::Bytes::new()).await
}

/**
向特定Node的record类型文件写入新记录（或清空数据）
 - 此函数是gfsc_server_record的包装，写入的是master，如果配合gfsc_special_sync，可以同步到整个node服务器，达到备份目的。
 - 此函数调用后，使用者必须主动调用gfsc_special_sync，方可同步到整个node服务器
 - 如果是普通的record写入，可以忽略taskname的值，如果是算子计算的记录，则taskname应该就是算子的名称。
 - command支持clear，override，或append  
 */
pub async fn gfsc_record(
    router: &GfsRouter,
    ihash: i32,
    idx: i32,
    file_name: &str,
    command:&str,
    taskname:&str,
    ikey: i32,
    body: bytes::Bytes,
) -> Result<()> {      
    let writer = router.get_writer(router.cur_dynasty, ihash)?;
    gfsc_server_record(&writer, ihash, idx, file_name, command, taskname, ikey, body).await
}

/**
对于配置了MQ的Node，函数可以通过MQ发出特定文件的同步指令，可以使得Master上的特定文件可以迅速在节点的slave上同步。
 - 实际上归档类文件不需要此函数，特别针对归档类文件执行，如果已经同步过，则会在salve执行同步时发生记录错误（stat id冲突）
 - record类文件，没有stat约束，会正常相应，完成文件的同步。
 */
pub async fn gfsc_special_sync(
    router: &GfsRouter,
    ihash: i32,
    idx: i32,
    file_name: &str
) -> Result<()> {      
    let writer = router.get_writer(router.cur_dynasty, ihash)?;    
    let token = new_token();
    let gfs_sync_url = format!(
        "{}/node/sync?token={}&ihash={}&idx={}&name={}",
        writer,
        token,
        ihash,
        idx,
        urlencoding::encode(file_name)       
    );

    let client = reqwest::Client::new();
    let mut headers = HeaderMap::new();
    headers.insert("Content-Type", "application/json; charset=utf-8".parse()?);
    headers.insert("Authorization", format!("Bearer {}", "token").parse()?);
    let r = client
        .get(gfs_sync_url)
        .headers(headers)
        .send()
        .await?
        .text()
        .await?;
    if r.eq("OK") || r.eq("ok") {
        return Ok(());
    }
    bail!("something is wrong")


}

/**
向特定服务器的record类型文件写入新记录（或清空数据）
 - 服务器可以是gfs中的任何一台，包括slave，但使用者必须自己清楚写给了那个服务
 - 如果是普通的record写入，可以忽略taskname的值，如果是算子计算的记录，则taskname应该就是算子的名称。
 - command支持clear，override，或append
 - 建议使用gfsc_record，此函数写入的是master，如果配合gfsc_special_sync，可以同步到整个node服务器，达到备份目的。
 */
pub async fn gfsc_server_record(   
    writer:&str,
    ihash: i32,
    idx: i32,
    file_name: &str,
    command:&str,
    taskname:&str,
    ikey: i32,
    body: bytes::Bytes,
) -> Result<()> {
    let token = new_token();
    let gfs_record_url = format!(
        "{}/record/{}?token={}&ihash={}&idx={}&name={}&ikey={}&taskname={}",
        writer,
        command,
        token,
        ihash,
        idx,
        urlencoding::encode(file_name),
        ikey,
        taskname
    );

    let client = reqwest::Client::new();
    let mut headers = HeaderMap::new();
    headers.insert("Content-Type", "application/json; charset=utf-8".parse()?);
    headers.insert("Authorization", format!("Bearer {}", "token").parse()?);
    let r = client
        .post(gfs_record_url)
        .headers(headers)
        .body(body)
        .send()
        .await?
        .text()
        .await?;
    if r.eq("OK") || r.eq("ok") {
        return Ok(());
    }
    bail!("something is wrong")
}

pub fn gfsc_server_record_block(   
    writer:&str,
    ihash: i32,
    idx: i32,
    file_name: &str,
    command:&str,
    taskname:&str,
    ikey: i32,
    body: bytes::Bytes,
) -> Result<()> {
    let token = new_token();
    let gfs_record_url = format!(
        "{}/record/{}?token={}&ihash={}&idx={}&name={}&ikey={}&taskname={}",
        writer,
        command,
        token,
        ihash,
        idx,
        urlencoding::encode(file_name),
        ikey,
        taskname
    );

    // let client = reqwest::Client::new();
    let client = reqwest::blocking::Client::new();

    let mut headers = HeaderMap::new();
    headers.insert("Content-Type", "application/json; charset=utf-8".parse()?);
    headers.insert("Authorization", format!("Bearer {}", "token").parse()?);
    let r = client
        .post(gfs_record_url)
        .headers(headers)
        .body(body)
        .send()?
        .text()?;
    if r.eq("OK") || r.eq("ok") {
        return Ok(());
    }
    bail!("something is wrong")
}

/**
copy file from gfs to local
 *
 *
 */
pub async fn gfsc_copy_file(
    router: &GfsRouter,
    dynasty_id: usize,
    idx: i32,
    fname: &str,
    save_path: &PathBuf,
    save_name: &str,
) -> Result<()> {
    if !filename_is_valid(fname) {
        bail!("save_as_file is valid!")
    }
    let ihash = string_hash256(fname);
    let reader_download_url = router.get_download_url(dynasty_id, idx, ihash, fname)?;
    copy_as_file(reader_download_url.as_str(), save_path, save_name).await
}

/**
copy file from gfs master to the local(slave)
 * only be used by slave.
 *
 */
pub async fn gfsc_sync_from_master(
    master_download_url: &str,
    fname: &str,
    save_path: &PathBuf,
    save_name: &str,
) -> Result<()> {
    if !filename_is_valid(fname) {
        bail!("save_as_file is valid!")
    }
    copy_as_file(master_download_url, save_path, save_name).await
}

/**
从指定的gfs_download_url读取文档，保存到save_path路径下，保存文件名为save_name
 * let gfs_download_url = router.get_download_url(dynasty, idx, ihash, fname)?;（任意服务器）
 * let master_download_url = router.get_master_download_url(dynasty, idx, ihash, fname)?;（特定master）
 * 这里的保存文件名，可以任意自定义，不需要维持原名（gfs_download_url已经确定了文件名）
 * 这里增加一个保存路径，是因为gfs slave同步需要
 */
async fn copy_as_file(download_url: &str, save_path: &PathBuf, save_name: &str) -> Result<()> {
    let client = reqwest::Client::new();
    let mut headers = HeaderMap::new();
    headers.insert("Content-Type", "application/json; charset=utf-8".parse()?);
    headers.insert("Authorization", format!("Bearer {}", "token").parse()?);

    let r = client
        .get(download_url)
        .headers(headers)
        .send()
        .await?
        .bytes()
        .await?;

    if r.len() <= 0 {
        return Result::Err(anyhow!("read error"));
    }

    if !save_path.exists() {
        std::fs::create_dir_all(save_path.as_path())?;
    }
    // 这里也可能可以尝试tokio::fs::API,异步写文件。
    let pathfile = save_path.join(save_name);
    let mut file = std::fs::File::create(pathfile)?;
    file.write_all(&r.to_vec())?;
    file.flush()?;
    return Ok(());
}

/**
read text file from gfs
 * 可以先判断是否支持文本预览 ：if is_preview_file(ext)
 * gfs_download_url = router.get_download_url(router.cur_dynasty, idx, filename)?;
 */
pub async fn gfsc_read_text(
    router: &GfsRouter,
    dynasty: usize,
    idx: i32,
    ihash: i32,
    fname: &str, /* gfs_read_url: &str */
) -> Result<String> {
    let read_url = router.get_reader_url(dynasty, idx, ihash, &fname)?;
    let client = reqwest::Client::new();
    let mut headers = HeaderMap::new();
    headers.insert("Content-Type", "application/json; charset=utf-8".parse()?);
    headers.insert("Authorization", format!("Bearer {}", "token").parse()?);

    let r = client
        .get(read_url)
        .headers(headers)
        .send()
        .await?
        .text()
        .await?;
    return Ok(r);
}
/**
 * 从指定的服务器读取其Stat记录，select * from stat where id>? order by asc limit ?
 * 通常是读取node中的writer,用于提供给slave进行数据同步。但不限定于writer，也可以是reader/slave。  
 * gfs_server = router.get_writer(router.cur_dynasty, filename)?;
 *
 */
pub async fn gfsc_get_stats(gfs_server: &str, from: i32, limit: u32) -> Result<Vec<Stat>> {
    let client = reqwest::Client::new();
    let mut headers = HeaderMap::new();
    headers.insert("Content-Type", "application/json; charset=utf-8".parse()?);
    headers.insert("Authorization", format!("Bearer {}", "token").parse()?);
    // let writer = "http://localhost:9996";
    let get_stat_url = format!(
        "{}/node/stats?from={}&limit={}&token={}",
        gfs_server,
        from,
        limit,
        new_token()
    );
    let r: Vec<Stat> = client
        .get(get_stat_url)
        .headers(headers)
        .send()
        .await?
        .json()
        .await?;
    return Ok(r);
}
/**
从指定的dynasty的读取指定idx开始的limit数量的文件列表
 - 由于每个Node的idx都是独立计数自增，所以执行结果会是多个（Node数）*limit的结果集合
 - 数据操作来自stat表的读取
 */
pub async fn gfsc_get_files(
    router: &GfsRouter,
    dynasty_id: usize,
    from: i32,
    limit: u32,
) -> Result<Vec<Stat>> {
    let readers = router.get_dynasty_readers(dynasty_id)?;
    let mut rets: Vec<Stat> = vec![];
    //for i in 0..readers.len() {
    for (_key, reader) in readers.iter() {
        let mut ret = gfsc_get_stats(reader, from, limit).await?;
        rets.append(&mut ret);
    }
    return Ok(rets);
}

/**
获取指定dynasty中特定Node的ihash下的文件列表，返回 Stat Vec
 - 数据来自stat表，而非目录遍历。
 -
 */
pub async fn gfsc_dir(
    router: &GfsRouter,
    dynasty_id: usize,
    ihash: i32,
    from: i32,
    limit: u32,
) -> Result<Vec<Stat>> {
    let reader = router.get_reader(dynasty_id, ihash)?;

    let client = reqwest::Client::new();
    let mut headers = HeaderMap::new();
    headers.insert("Content-Type", "application/json; charset=utf-8".parse()?);
    headers.insert("Authorization", format!("Bearer {}", "token").parse()?);

    let token = new_token();
    let get_dir_url = format!(
        "{}/dir?token={}&ihash={}&idx={}&limit={}",
        reader, token, ihash, from, limit
    );
    println!("{:#?}", get_dir_url);

    let r: Vec<Stat> = client
        .get(get_dir_url)
        .headers(headers)
        .send()
        .await?
        .json()
        .await?;
    return Ok(r);
}

/**
Save a `Stream` to a gfs
 * write to gfs with write_server.
 *
 */
pub async fn gfsc_stream_to_gfs<S, E>(
    router: &GfsRouter,
    fname: &str,
    stream: S,
) -> std::result::Result<GfsResponse, (StatusCode, String)>
where
    S: Stream<Item = std::result::Result<Bytes, E>>,
    E: Into<BoxError>,
{
    let ihash = string_hash256(fname);
    let root = PathBuf::from("./needn't real path");
    let writer_url = router
        .get_writer_url(router.cur_dynasty, ihash)
        .unwrap_or_default();
    return stream_to_file(
        writer_url.as_str(),
        &root,
        fname,
        STREAM_WRITE_TO_GFS,
        stream,
    )
    .await;
}

/**
axum页面中的流数据exract to file
 - fidx > 0 表示写入本地gfs的文件系统
 - fidx == 0 表示非本地，当前axum只是proxy，转发给writer_server
 */
pub async fn stream_to_file<S, E>(
    write_server: &str,
    gfs_root: &PathBuf,
    fname: &str,
    fidx: i32,
    stream: S,
) -> std::result::Result<GfsResponse, (StatusCode, String)>
where
    S: Stream<Item = std::result::Result<Bytes, E>>,
    E: Into<BoxError>,
{
    if !filename_is_valid(fname) {
        error!(
            "Inalid filename, it must be consists of exactly one normal! here is: {:#?}",
            fname
        );
        return Err((StatusCode::BAD_REQUEST, "Invalid filename".to_owned()));
    }

    async {
        // Convert the stream into an `AsyncRead`.
        let body_with_io_error = stream.map_err(|err| io::Error::new(io::ErrorKind::Other, err));
        let body_reader = StreamReader::new(body_with_io_error);
        futures::pin_mut!(body_reader);
        let mut retf = GfsResponse::default(); // { code:-1, name:"".to_string(),ihash:0, idx:0} ;
                                               // Create the file. `File` implements `AsyncWrite`.
        if fidx > 0 {
            // gfs writer
            let ihash = gfs_filename_hash(fname);
            let workpath = gfs_hashcode_to_path(gfs_root, ihash, fidx);
            if !workpath.exists() {
                std::fs::create_dir_all(workpath.as_path())?;
            }
            let pathfile = workpath.join(fname);
            let mut file = BufWriter::new(File::create(pathfile).await?);
            // Copy the body into the file.
            let isize = tokio::io::copy(&mut body_reader, &mut file).await?;
            debug!("copy done, write size = {}", isize);
            // retf = GfsResponse{ code:0, name:fname.to_string(), ihash: fhash, idx:fidx };
            retf.name = fname.to_string();
            retf.idx = fidx;
            retf.ihash = ihash;
        } else {
            // gfs proxy, write to remote gfs writer
            let mut buffer: Vec<u8> = Vec::new();
            let bn = body_reader.read_to_end(&mut buffer).await?;
            debug!(?bn);
            // 一定要有file_name方法，且参数不能为空，否则数据上传失败
            let part = reqwest::multipart::Part::bytes(Cow::from(buffer[..bn].to_vec()))
                .file_name(String::from(fname));
            if let std::result::Result::Ok(gr) =
                write_to_gfs_with_part(write_server, fname, part).await
            {
                retf = gr;
            }
        }
        std::result::Result::Ok::<_, io::Error>(retf)
    }
    .await
    .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))
}

pub(crate) async fn gfsc_file_exist(
    server: &str,
    ihash: i32,
    idx: i32,
    file_name: &str,
) -> Result<()> {
    let client = reqwest::Client::new();
    let mut headers = HeaderMap::new();
    headers.insert("Content-Type", "application/json; charset=utf-8".parse()?);
    headers.insert("Authorization", format!("Bearer {}", "token").parse()?);

    let token = new_token();
    let get_dir_url = format!(
        "{}/file/exist?token={}&ihash={}&idx={}&fname={}",
        server, token, ihash, idx, file_name
    );

    let r = client
        .get(get_dir_url)
        .headers(headers)
        .send()
        .await?
        .text()
        .await?;
    if r.eq("OK") || r.eq("ok") {
        return Ok(());
    }
    bail!("not exist")
}
