#![allow(unused_imports)]
#![allow(dead_code)]

use std::iter::* ;
use std::io::{Read, BufRead, BufReader} ;
use std::fs::{File} ;
use std::collections::{HashSet, HashMap} ;
use std::path::{Path, PathBuf};
use std::time::Duration;
use std::vec::IntoIter ;
use std::net::SocketAddr ;
use std::env::var ;
use std::str;

use std::io ;
use std::net::TcpStream ;
use std::thread::{sleep} ;
/*
use async_std::task::{sleep} ;
use async_std::io;
use async_std::net::{TcpStream} ;
*/
use async_recursion::async_recursion;

use dotenv::dotenv;

use itertools::Itertools;

use rand::random;
use ssh2::{self, Session, Sftp, FileStat, Error};

use futures::executor::block_on ;
/*
use futures::future::{BoxFuture, FutureExt};
*/
use rust_embed::RustEmbed;

use http::Uri ;
use avro_rs::{Schema, from_avro_datum} ;

use futures::{future, select};
use futures::stream::{self, StreamExt};
use futures::future::Future ;

use std::sync::{Arc, Mutex} ;



use self::utils::* ;

#[derive(RustEmbed)]
#[folder = "avsc"]
struct AvscAsset;

#[derive(Clone, Debug)]
struct HttpSelector {
    url: Vec<String>,
    index: Arc<Mutex<usize>>,
    prefix: String,
}

impl HttpSelector {
    fn create(url: Vec<&str>, prefix: &str) -> HttpSelector {
        HttpSelector { 
            url: url.into_iter().map(|x| x.to_owned()).collect(), 
            index: Arc::new(Mutex::new(0)), 
            prefix: prefix.into()
        }
    }

    fn select(self: &HttpSelector) -> String {
        let index_val = self.index.lock().unwrap() ;
        format!("{}{}", self.url[*index_val].to_owned(), self.prefix) 
    }
    
    fn roll(self: &HttpSelector) -> Result<(), String> {
        if self.url.len() <= 1 {
            Err("roll.len <= 1")?
        }
        let mut index_val = self.index.lock().unwrap() ;
        *index_val = (*index_val + 1) % self.url.len() ;
        println!("HttpSelector roll... {}", *index_val) ;
        Ok(())

    }
}


fn list_sftp_dir(client: &Sftp, path: &str, collector: &mut Vec<(PathBuf, FileStat)>){
    let childrens = client.readdir(path.as_ref()) ;
    if childrens.is_ok() {
        for (child_path, stat) in childrens.unwrap() {
            if stat.is_dir() {
                list_sftp_dir(client,  child_path.to_str().unwrap(), collector) ;
            } else {
                collector.push((child_path, stat)) ;
            }
        }
    }
}

async fn walk_sftp_dir(_offset: &Option<(String, String)>, client: &Sftp, path: &str) -> HashSet<String> {
    let mut collector = Vec::new() ;
    let path_prefix_len = path.len() + 1 ;
    list_sftp_dir(client, path, &mut collector) ;
    collector.iter().map(|x| x.0.to_str().unwrap()[path_prefix_len..].to_string()).collect::<HashSet<_>>() 
}

#[async_recursion]
async fn list_hdfs_dir(webhdfs: &HttpSelector, path: &str, collector: &mut Vec<String>, level: Option<u8>) -> Result<(), String> {
    if let Some(x) = level { if x <= 0 { 
        collector.push(path.into()) ;
        return Ok(()) 
    }; } ;

    let res = loop {
        let url = format!("{}{}?op=LISTSTATUS", webhdfs.select(), path) ;
        let res_current : serde_json::Value = reqwest::get(&url).await.map_err(|x| format!("{:?}", x))?
            .json().await.map_err(|x| format!("{:?}", x))? ;

        if res_current["RemoteException"]["exception"].as_str() == Some("StandbyException") {
            webhdfs.roll()? ;
        } else {
            break res_current  ;
        }
    } ;

    if res["RemoteException"]["exception"].as_str() == Some("FileNotFoundException") {
        return Ok(()) ;
    }

    let file_status = res["FileStatuses"]["FileStatus"].as_array().ok_or("FileStatuses.FileStatus array not found!")? ;
    for child in file_status {
        let file_type = child["type"].as_str().ok_or("type field not found!") ? ;
        let file_path = format!("{}/{}", path, child["pathSuffix"].as_str().ok_or("pathSuffix field not found!")?) ;
        if file_type == "DIRECTORY" {
            list_hdfs_dir(webhdfs, &file_path, collector, level.map(|x| x-1)).await? ;
        } else {
            collector.push(file_path) ;
        }
    }
    Ok(())
}

async fn walk_hdfs_dir(webhdfs: &HttpSelector, path: &str, level: Option<u8>) -> Result<HashSet<String>, String>  {
    let mut collector = Vec::new() ;
    let path_prefix_len = path.len() + 1 ;
    list_hdfs_dir(webhdfs, path, &mut collector, level).await? ;
    Ok(collector.iter().map(|x| x[path_prefix_len..].to_string()).collect::<HashSet<_>>())
}

async fn create_hdfs_file(webhdfs: &HttpSelector, path: &str, bs: Vec<u8>) -> Result<(), String> {
    let res : String = loop {
        let url = format!("{}{}?op=CREATE", webhdfs.select(), path) ;
        let res_current = reqwest::Client::new()
            .put(&url)
            .body(bs.clone())
            .send().await.map_err(|x| format!("{:?}", x))?
            .text().await.map_err(|x| format!("{:?}", x))? ;

        if res_current.contains("StandbyException") {
            webhdfs.roll()? ;
        } else {
            break res_current ;
        }
    } ;
    
    println!("create-res: {:?}", res) ;
    Ok(())
}
async fn append_hdfs_file(webhdfs: &HttpSelector, path: &str, bs: Vec<u8>) -> Result<(), String> {
    let res : String = loop {
        let url = format!("{}{}?op=CREATE", webhdfs.select(), path) ;
        let res_current : String = reqwest::Client::new()
            .put(&url)
            .body(bs.clone())
            .send().await.map_err(|x| format!("{:?}", x))?
            .text().await.map_err(|x| format!("{:?}", x))? ;

        if res_current.contains("StandbyException") {
            webhdfs.roll()? ;
        } else {
            break res_current ;
        }
    } ;
    
    println!("append-res: {:?}", res) ;
    Ok(())
}


async fn mk_hdfs_dir(webhdfs: &HttpSelector, path: &str) -> Result<(), String> {
    let url = format!("{}{}?op=MKDIRS", webhdfs.select(), path) ;

    println!("mkdir directory: {}", path) ;
    println!("url:{}", url) ;

    let client = reqwest::Client::builder().timeout(Duration::from_millis(500)).build().unwrap();

    let res : serde_json::Value = client
        .put(&url)
        .send().await.map_err(|x| format!("{:?}", x))?
        .json().await.map_err(|x| format!("{:?}", x))? ;

    println!("mkdir-res: {:?}", res) ;

    Ok(())
}

pub trait HasLen {
    fn len(&self) -> usize;
}

impl<A> HasLen for Vec<A> {
    fn len(&self) -> usize { self.len() }
}
impl<A> HasLen for &Vec<A> {
    fn len(&self) -> usize { (*self).len() }
}
impl HasLen for &[u8] {
    fn len(&self) -> usize { (*self).len() }
}

mod avro {
    use super::* ;
    use avro_rs::* ;
    fn encode_variable(mut z: u64, buffer: &mut Vec<u8>) {
        loop {
            if z <= 0x7F {
                buffer.push((z & 0x7F) as u8);
                break;
            } else {
                buffer.push((0x80 | (z & 0x7F)) as u8);
                z >>= 7;
            }
        }
    }
    pub fn zig_i64(n: i64, buffer: &mut Vec<u8>) {
        encode_variable(((n << 1) ^ (n >> 63)) as u64, buffer)
    }
    fn append_long(buffer: &mut Vec<u8>, i: i64) -> &Vec<u8>{
        zig_i64(i, buffer) ;
        buffer
    }

    fn append_bytes<'a>(buffer: &'a mut Vec<u8>, bs: &[u8]) {
        append_long(buffer, bs.len() as i64) ;

        buffer.extend_from_slice(bs) ;
    }
    fn append_meta<'a>(buffer: &'a mut Vec<u8>, meta: &HashMap<&[u8], &[u8]>) {
        append_long(buffer, meta.len() as i64) ;
        meta.into_iter().for_each(
            |(k, v)| {
                append_bytes(buffer, k) ;
                append_bytes(buffer, v) ;
            }
        ) ;
        buffer.push(0u8) ;
    }
    
    pub fn marker() -> Vec<u8> {
        repeat_with(random).take(16).collect()
    }

    pub fn encode_block(batch: Vec<u8>, num: u8) -> Vec<u8> {
        let mut compressed = batch ;

        let mut avro_block = Vec::with_capacity(compressed.len() + 16) ;
        append_long(&mut avro_block, num as i64) ;
        append_bytes(&mut avro_block, &mut compressed) ;
        avro_block
    }
    pub fn header(schema_str: &str, code: &str) -> Vec<u8> {
        let magic: &[u8] = b"Obj\x01";

        let mut metadata = HashMap::with_capacity(2);
        metadata.insert("avro.schema".as_bytes(), schema_str.as_bytes());
        metadata.insert("avro.codec".as_bytes(), code.as_bytes());

        let mut avro_header = Vec::new() ;
        avro_header.extend_from_slice(magic) ;

        append_meta(&mut avro_header,  &metadata) ;

        avro_header
    }

    pub fn split_array(schema: &Schema, v: Vec<u8>) -> Vec<(Option<String>, (Vec<u8>, Schema))>{
        let avro_value = from_avro_datum(&schema, &mut v.as_slice(), None).expect("value") ;
        match avro_value {
            types::Value::Array(xs) =>  {
                let element_schema_box =  match schema {
                    Schema::Array(ref inner) => inner,
                    _ => {
                        panic!("array schema expected!")
                    }
                } ;
                let element_schema = &*element_schema_box.clone() ;
                xs.into_iter()
                .map(|x| match x {
                    types::Value::Union(v) => {
                        if let Schema::Union(ref inner) = element_schema {
                            let (_, branch_schema) = inner.find_schema(&v).unwrap() ;
                            (Some(v.subname()), (to_avro_datum(&branch_schema, *v).unwrap(), branch_schema.to_owned()))
                        } else {
                            panic!("union value didn't belong to union schema!")
                        }
                        
                    },
                    _ => (None, (to_avro_datum(&element_schema, x.clone()).unwrap(), element_schema.clone()))
                })
                .collect()
            },
            _ => vec![(None, (v, schema.to_owned()))]
        }
    }

/*
    pub fn frag_to_container<T>(schema: &Schema, values: T) -> impl Iterator<Item=(Option<String>, Vec<u8>)>
    where T: Iterator<Item=Vec<u8>>  {
        let batch_size = 1024 * 1024 ;

        let marker = avro::marker() ;

        let mut st = HashMap::<Option<String>, (Vec<u8>, u8, Option<Schema>)>::new() ;
        let schema_owned = schema.to_owned() ;
        values.flat_map(move |x| avro::split_array(&schema_owned, x))
            .batching(
                move |it| {
                    while let Some((key, (val, record_schema))) = it.next() {
                        match st.get_mut(&key) {
                            None => {
                                st.insert(key.clone(), (val, 1, Some(record_schema,))) ;
                            }
                            Some(payload) => {
                                let accum_size = payload.0.len() ;
                                let val_size = val.len() ;
                                if val_size + accum_size > batch_size {
                                    let payload_owned = payload.to_owned() ;
                                    *payload = (val, 1, None) ;
                                    return Some(vec![(key, payload_owned)]) ;
                                } else {
                                    let (buf, num, schema) = payload.to_owned() ;
                                    let mut new_buf = Vec::new() ;
                                    new_buf.extend_from_slice(buf.as_slice()) ;
                                    new_buf.extend_from_slice(val.as_slice()) ;
                                    *payload = (new_buf, num + 1, schema) ;
                                }
                            }
                        };
                    } ;
                    
                    if !st.is_empty() {
                        let st_flush = st.clone().into_iter().collect() ;
                        st.clear() ;
                        Some(st_flush)
                    } else {
                        None
                    }
                }
              ) 
            .flatten()
            .flat_map(|(name, (buf, num, info))| {
                if info.is_some() {
                    let current_schema = info.unwrap() ;
                    let canonical_schema = current_schema.canonical_form() ;
                    vec![(name.clone(), avro::header(&canonical_schema, "null")), 
                         (name.clone(), avro::encode_block(buf, num))]
                } else {
                    vec![(name, avro::encode_block(buf, num))]
                }
            })
            .map(move |(key, mut buf)| (key, {buf.extend_from_slice(&marker); buf}) )
    }
*/
}

/*
async fn sync_sftp_hdfs(schema: &Schema, subpath: &str, sftp_client: &Sftp, sftp_prefix: &str, webhdfs: &HttpSelector, hdfs_prefix: &str) -> io::Result<u64> {

    let sftp_path_s = format!("{}/{}", sftp_prefix, subpath) ;
    let sftp_path = Path::new(&sftp_path_s) ;
    let sftp_file = sftp_client.open(&sftp_path)? ;

    let batch_size = 1024 * 1024 * 200 ;

    let mut st = HashMap::<Option<String>, (Vec<u8>, Option<()>)>::new() ;

    let avro_container = 
    avro::frag_to_container(
        &schema, 
        BufReader::with_capacity(1024 * 1024, sftp_file)
            .lines().filter_map(|x| x.ok())
            .filter_map(|x| base64::decode(x).ok())
            .filter_map(|x| zstd::decode_all(x.as_slice()).ok())
    ).batching(
        move |it| {
            while let Some((key, val)) = it.next() {
                match st.get_mut(&key) {
                    None => {
                        st.insert(key, (val, Some(()))) ;
                    }
                    Some(payload) => {
                        let accum_size = payload.0.len() ;
                        let val_size = val.len() ;
                        if val_size + accum_size > batch_size {
                            let payload_owned = payload.to_owned() ;
                            *payload = (val, None) ;
                            return Some(vec![(key, payload_owned)]) ;
                        } else {
                            let info = payload.1.to_owned() ;
                            let mut new_buf = Vec::new() ;
                            new_buf.extend_from_slice(payload.0.as_slice()) ;
                            new_buf.extend_from_slice(val.as_slice()) ;
                            *payload = (new_buf, info) ;
                        }
                        
                    }
                };
            } ;
            
            if !st.is_empty() {
                let st_flush = st.clone().into_iter().collect() ;
                st.clear() ;
                Some(st_flush)
            } else {
                None
            }
        }
    )
    .flatten() ;
    for (name, (xs, info)) in avro_container {
        use itertools::Itertools;
            let (sftp_topic, sftp_dt_part, sftp_file_name) = subpath.splitn(3, "/").collect_tuple().unwrap() ;
            let hdfs_dir = format!( "{}/{}/branch={}/{}", hdfs_prefix, sftp_topic, name.unwrap(), sftp_dt_part) ;
            let hdfs_path = format!("{}/{}", hdfs_dir, sftp_file_name.replace(".zstd.b64", "").replace(":", "_") ) ;
            println!("append webhdfs: {}", hdfs_path) ;

            if info.is_some() {
                mk_hdfs_dir(&webhdfs, &hdfs_dir).await.expect("mk_hdfs_dir failed!") ;
                create_hdfs_file(&webhdfs, &hdfs_path, xs).await.expect("create_hdfs_file failed!") ;
            } else {
                append_hdfs_file(&webhdfs, &hdfs_path, xs).await.expect("append_hdfs_file failed!")  ;
            }
    }

    Ok(0)
}
*/

async fn sync_sftp_hdfs(schema: &Schema, subpath: &str, sftp_client: &Sftp, sftp_prefix: &str, webhdfs: &HttpSelector, hdfs_prefix: &str) -> io::Result<u64> {
    let sftp_path_s = format!("{}/{}", sftp_prefix, subpath) ;
    let sftp_path = Path::new(&sftp_path_s) ;
    let sftp_file = sftp_client.open(&sftp_path)? ;

    let batch_size = 1024 * 1024 * 200 ;

    // println!("sftp_file: {:?}", sftp_path) ;

    stream::iter(BufReader::with_capacity(1024 * 1024, sftp_file).lines()
        .filter_map(|x| x.ok() )
        .filter_map(|x| base64::decode(x).ok() )
        .filter_map(|x| zstd::decode_all(x.as_slice()).ok() )
        .filter(|x| from_avro_datum(&schema, &mut x.as_slice(), None).is_ok() )
        .filter_map(|x| zstd::encode_all(&x[..], 0).ok() ) 
        .map(|x| format!("{}\n", base64::encode(x)))
        .peekable().batching(move |it| {
            let mut size  = 0 ;
            let mut buf = Vec::new() ;
            while let Some(val) = it.next() {
                size += val.len() ;
                buf.push(val) ;
                
                match it.peek() {
                    None => return Some(buf),
                    Some(val) => {
                        if val.len() + size > batch_size {
                            return Some(buf)
                        }
                    }
                }
            } ;
            None
        }))
        .for_each(|buf| async move {
            use itertools::Itertools;
            let (sftp_topic, sftp_dt_part, sftp_file_name) = subpath.splitn(3, "/").collect_tuple().unwrap() ;
            let hdfs_dir = format!( "{}/{}/{}", hdfs_prefix, sftp_topic, sftp_dt_part) ;
            let hdfs_path = format!("{}/{}", hdfs_dir, sftp_file_name.replace(":", "_") ) ;
            println!("append webhdfs: {}", hdfs_path) ;

            let xs = buf.concat().as_bytes().to_vec() ;
            if true {
                mk_hdfs_dir(&webhdfs, &hdfs_dir).await.expect("mk_hdfs_dir failed!") ;
                create_hdfs_file(&webhdfs, &hdfs_path, xs).await.expect("create_hdfs_file failed!") ;
            } else {
                append_hdfs_file(&webhdfs, &hdfs_path, xs).await.expect("append_hdfs_file failed!")  ;
            }

        }).await ;

    Ok(0)
}

pub async fn sftp(host: &SocketAddr, username: &str, password: &str) -> Sftp{
    let tcp = TcpStream::connect(host).expect("ssh  connect error!") ;
    let mut sess = Session::new().unwrap();
    sess.set_tcp_stream(tcp);
    sess.handshake().expect("ssh handshake failed!");

    sess.userauth_password(username, password).expect("ssh auth failed!");
    assert!(sess.authenticated());
    sess.sftp().expect("acquire sftp failed!")
}

mod models {
    #[derive(Debug)]
    pub struct SyncFileBufferSftp {
        pub bucket: String,
        pub path: String,
        pub host: String,
        pub dt: String,
        pub tm: String,
        pub uri: String,
        pub start_ts: String,
        pub end_ts: String,
        pub num: u8,
        pub size: u8,
        pub hdfs_host: String,
        pub hdfs_splits: String
    }
    
}
mod utils {
    use super::* ;
    use chrono::prelude::*;
    use regex::Regex;
    pub fn resolve_socket_addr(hosts: &str) -> Option<SocketAddr> {
        let ips = get_if_addrs::get_if_addrs().unwrap().iter()
            .map(|interface| interface.ip()).collect::<HashSet<_>>();
        hosts.split(",")
            .map(|x| x.replace("http://", ""))
            .filter_map(|x| x.parse::<SocketAddr>().ok())
            .filter(|x| ips.contains(&x.ip()))
            .next()
    }
    pub fn parse_sync_sftp (f: &str) -> Option<models::SyncFileBufferSftp> {
        let re = Regex::new(r"(?x)
           (?P<bucket>[^_]*)_(?P<path>\w+)
           /dt=(?P<dt>\d{8})
           /(?P<host>\S+)_(?P<tm>\d{4})").unwrap();

        let caps = re.captures(f)? ;
        Some(models::SyncFileBufferSftp {
            bucket: caps["bucket"].into(),
            path: caps["path"].into(),
            host: caps["host"].into(),
            dt: caps["dt"].into(),
            tm: caps["tm"].into(),
            uri: f.into(),
            start_ts: "".into(),
            end_ts: "".into(),
            num: 0,
            size: 0,
            hdfs_host: "".into(),
            hdfs_splits: "".into(),
        })
    }
    pub fn sync_offset(sync: &models::SyncFileBufferSftp) -> (String, String) {
        (sync.dt.to_owned(), sync.tm.to_owned())
    }

    pub fn now_str() -> String {
        let china_dt = Utc::now().with_timezone(&FixedOffset::east(8 * 3600));
        china_dt.date().format("%Y%m%d").to_string()
    }

}

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    env_logger::init();

    dotenv().ok() ;
    let env_hosts = var("EXCHANGE_BUFFER_SFTP_HOSTS").unwrap_or("127.0.0.1:0".to_string()) ;
    if resolve_socket_addr(&env_hosts).is_none() {
        println!("EXCHANGE_BUFFER_SFTP_HOST not match! skip") ;
        return Ok(()) ;
    }

    let in_sftps = var("EXCHANGE_BUFFER_SFTP_IN_SFTPS").unwrap_or("http://localhost:3029".to_string()) ;
    let in_sftp_username = var("EXCHANGE_BUFFER_SFTP_IN_SFTP_USERNAME").expect("no sftp username found!") ;
    let in_sftp_password = var("EXCHANGE_BUFFER_SFTP_IN_SFTP_PASSWORD").expect("no sftp password found!") ;
    let in_sftp_prefix = &var("EXCHANGE_BUFFER_SFTP_IN_SFTP_PREFIX").unwrap_or("/data/data_engine/data_buffer_dev.db".to_string()) ;

    let out_hdfss = var("EXCHANGE_BUFFER_SFTP_OUT_HDFSS").unwrap_or("http://localhost:50070".to_string()) ;
    let out_hdfs_prefix = &var("EXCHANGE_BUFFER_SFTP_OUT_HDFS_PREFIX").unwrap_or("/user/hive/warehouse/data_exchange_dev.db".to_string()) ;


    let schema_bs = AvscAsset::get("dc_sdk_dev.avsc").expect("dc_sdk_dev.avsc not found!") ;
    let schema_s = str::from_utf8(&schema_bs).expect("from_utf8 error!") ;
    let schema = &Schema::parse_str(&schema_s).expect("avsc schema error!") ;


    println!("connect to sftp...") ;
    let mut one_sftp : SocketAddr = in_sftps.split(",").next().unwrap().replace("http://", "").parse().expect("") ;
    one_sftp.set_port(22) ;
    let sftp_client = &sftp(&one_sftp, &in_sftp_username, &in_sftp_password).await ;

    let webhdfs_hosts = out_hdfss.split(",").collect() ;
    let webhdfs = &HttpSelector::create(webhdfs_hosts, "/webhdfs/v1") ;

    // let ond_hdfs = webhdfs_server_hosts.split(",").next().unwrap() ;
    // let webhdfs_prefix = format!("{}/webhdfs/v1", &ond_hdfs) ;

    println!("restore status from hdfs...") ;

    let subject_prefix = format!("{}/{}",  &out_hdfs_prefix, "dc_sdk_dev") ;
    let hdfs_files_set = walk_hdfs_dir(&webhdfs, &subject_prefix, Some(1)).await.expect("walk_hdfs_dir failed!") ;
    let mut hdfs_files = hdfs_files_set.iter().collect::<Vec<_>>() ;
    hdfs_files.sort_by( |a, b| b.cmp(a) ) ;

    println!("hdfs_files: {:?}", hdfs_files) ;

    let mut buffer_offset : Option<(String, String)> = None ;
    for file_dt_path in  hdfs_files.iter().filter(|x| x.to_string()  <= format!("dt={}", now_str())) {
        let subject_dt_path = format!("{}/{}", &subject_prefix, &file_dt_path) ;
        let avro_files = walk_hdfs_dir(&webhdfs, &subject_dt_path, None).await.expect("walk_hdfs_dir failed!") ;

        let dt = file_dt_path.split("=").last().unwrap() ;

        if let Some((offset_dt, _)) = &buffer_offset {
            if offset_dt != dt {
                break ;
            }
        }
        if ! avro_files.is_empty() {
            let tm_file = avro_files.iter().max().unwrap() ;
            let file_tm = tm_file.split("_").last().unwrap().replace(".avro.zstd.b64", "") ;
            let new_buffer_offset = Some((dt.to_string(), file_tm.to_string())) ;

            if new_buffer_offset > buffer_offset {
                buffer_offset = new_buffer_offset ;
            }
        }
    }

    println!("init buffer_offset:{:?}", buffer_offset) ;


    loop {
        let sftp_files = walk_sftp_dir(&buffer_offset, &sftp_client, &in_sftp_prefix).await ;

        let max_offset = stream::iter(sftp_files.iter())
            .filter(|x| future::ready(x.ends_with(".b64")))
            .filter(|x| future::ready(
                    match &buffer_offset {
                        Some(offset_st) => {
                            sync_offset(&parse_sync_sftp(x).unwrap()) > *offset_st
                        },
                        None => true
                    }
            ))
            .inspect(|f| println!("new file: {}", f))
            .filter_map(|f| async move {
                let sync_ret = sync_sftp_hdfs(schema, f, sftp_client, in_sftp_prefix, webhdfs, out_hdfs_prefix).await ;
                match sync_ret {
                    Ok(_) => Some(f),
                    Err(_) => None
                }
            })
            .map(|x| sync_offset(&parse_sync_sftp(x).unwrap()) )
            .collect::<Vec<_>>().await.into_iter().max();

        buffer_offset = max_offset.clone().or(buffer_offset) ;

        if max_offset.is_some() {
            println!("buffer_offset:{:?}", buffer_offset) ;
        }

        sleep(Duration::from_secs(5)) ;
    }

}

mod test {
    // sync_sftp_hdfs(&schema_str, &sftp, Path::new("larluo.avro.zstd.b64"), &hdfs_client, Path::new("/larluo")).await ;
}

