use crate::imp::tonicrpc::kcptunnel::kcp::tunnel_id_str_to_conv_u32;
use crate::imp::tonicrpc::kcptunnel::kcp::IAsyncKcpTunnel;

use ekho::kcp::{Config, ControlBlock, Error};
use futures_util::{AsyncReadExt, AsyncWriteExt};
use tokio::{sync::watch::Receiver, task::JoinHandle};

use std::cmp::max;
use std::collections::HashMap;
use std::time::SystemTime;
use std::{pin::Pin, sync::Arc};

use futures_ringbuf::*;
use tokio::time::{interval, Duration};
use tokio_util::compat::Compat;
use tokio_util::compat::FuturesAsyncReadCompatExt;

lazy_static! {
    static ref CONTROL_FLOW_MAP: std::sync::RwLock<HashMap<String, i32>> =
        std::sync::RwLock::new(HashMap::new());
}

fn get_or_set_control_flow_map(
    is_set: bool,
    tunnel_id: String,
    flow_type: Option<i32>,
) -> Result<Option<i32>, anyhow::Error> {
    let res = if is_set {
        let mut map = CONTROL_FLOW_MAP
            .try_write()
            .map_err(|e| anyhow::anyhow!("CONTROL_FLOW_MAP.try_write() err: {:?} ", e))?;

        let res = match flow_type {
            Some(flow) => map.insert(tunnel_id.to_owned(), flow),
            None => map.remove(&tunnel_id),
        };
        res
    } else {
        match CONTROL_FLOW_MAP
            .try_read()
            .map_err(|e| anyhow::anyhow!("map.try_read() err: {:?} ", e))?
            .get(&tunnel_id)
        {
            Some(flow) => Some(flow.clone()),
            None => None,
        }
    };

    Ok(res)
}
// control flow
// see https://github.com/skywind3000/kcp/wiki/Flow-Control-for-Users
pub fn calcute_control_flow(tunnel_id: String, wait_send: u16, send_wnd: u16) {
    // [等待窗口 = 0 ~ 等待窗口 = 1/2 发送窗口]
    // [等待窗口 = 1/2 发送窗口  ~  等待窗口 = 发送窗口]
    // [等待窗口 = 发送窗口  ~  等待窗口 = 2倍发送窗口]
    // [等待窗口 > 2倍发送窗口 ]

    let read_flow =
        if let Ok(control_flow) = get_or_set_control_flow_map(false, tunnel_id.clone(), None) {
            if let Some(flow) = control_flow {
                flow
            } else {
                -1
            }
        } else {
            -1
        };

    let mut control_flow = 0;
    if wait_send <= send_wnd / 2 {
        control_flow = 0;
    } else if send_wnd / 2 < wait_send && wait_send < send_wnd / 2 {
        control_flow = 1;
    } else if wait_send >= send_wnd && wait_send < send_wnd * 2 {
        control_flow = 2;
    } else if wait_send >= send_wnd * 2 {
        control_flow = 3;
    }
    if read_flow != control_flow {
        if let Err(err) = get_or_set_control_flow_map(true, tunnel_id, Some(control_flow)) {
            log::error!("calcute_control_flow error:{:?}", err);
        }
    }
}

// [等待窗口 = 0 ~ 等待窗口 = 1/2 发送窗口]
pub fn is_steam_backpressure_normal(tunnel_id: String) -> bool {
    let mut is_normal = true;
    if let Ok(control_flow) = get_or_set_control_flow_map(false, tunnel_id, None) {
        if let Some(flow) = control_flow {
            if flow != 0 {
                is_normal = false;
            }
        }
    }
    is_normal
}

// [等待窗口 = 1/2 发送窗口  ~  等待窗口 = 发送窗口]
pub fn is_steam_backpressure_high(tunnel_id: String) -> bool {
    let mut is_backpressure_high = false;
    if let Ok(control_flow) = get_or_set_control_flow_map(false, tunnel_id, None) {
        if let Some(flow) = control_flow {
            if flow > 0 {
                is_backpressure_high = true;
            }
        }
    }
    is_backpressure_high
}

// [等待窗口 = 发送窗口  ~  等待窗口 = 2倍发送窗口]
pub fn is_steam_backpressure_heavy(tunnel_id: String) -> bool {
    // 2
    let mut is_backpressure_heavy = false;
    if let Ok(control_flow) = get_or_set_control_flow_map(false, tunnel_id, None) {
        if let Some(flow) = control_flow {
            if flow > 1 {
                is_backpressure_heavy = true;
            }
        }
    }
    is_backpressure_heavy
}

// [等待窗口 > 2倍发送窗口 ]
pub fn is_steam_backpressure_block(tunnel_id: String) -> bool {
    // 2
    let mut is_backpressure_block = false;
    if let Ok(control_flow) = get_or_set_control_flow_map(false, tunnel_id, None) {
        if let Some(flow) = control_flow {
            if flow > 2 {
                is_backpressure_block = true;
            }
        }
    }
    is_backpressure_block
}

// kcp imp implementation this async trait
// webrtc peer conn to kcp
#[async_trait::async_trait]
pub trait WebRtcConnToAsyncKcpTunnel {
    async fn to_asnyc_kcp_tunnel(
        // peer conn
        conn: Arc<dyn webrtc_util::Conn + Send + Sync>,
        // cancel sender
        cancel_tx: tokio::sync::mpsc::Sender<()>,
        // tunnel id
        tunnel_id: &str,
        // ekho config
        ekho_config: Config,
    ) -> Result<Box<dyn IAsyncKcpTunnel>, anyhow::Error>;
}

// pub static KCP_RING_BUFFER_SIZE: usize = 4096; //259_200; //1920*1080
/**
 * Powerful / Efficient Kcp Implementation
 * 1、tokio::select! to handler kcp inteval flush & ouput & receive
 * 2、ringbuffer to cache the tokio await io for (tonic rpc) or tokio async i/o.
 * 3、memory & cpu low usage!
*/
/// A session, built on top of KCP
#[pin_project::pin_project]
pub struct EkhoKcpImp {
    // conv
    conv: u32,
    // control channel
    // peer connection
    peer: Arc<dyn webrtc_util::Conn + Send + Sync>,
    // future runner
    runner: JoinHandle<()>,
    // ring buffer for send
    rb_send_tx: Compat<Endpoint>,
    // ring buffer for receive
    rb_receive_rx: Compat<Endpoint>,
}

#[async_trait::async_trait]
impl WebRtcConnToAsyncKcpTunnel for EkhoKcpImp {
    // webrtc connector
    async fn to_asnyc_kcp_tunnel(
        conn: Arc<dyn webrtc_util::Conn + Send + Sync>,
        cancel_tx: tokio::sync::mpsc::Sender<()>,
        tunnel_id: &str,
        ekho_config: Config,
    ) -> Result<Box<dyn IAsyncKcpTunnel>, anyhow::Error> {
        let conv = tunnel_id_str_to_conv_u32(tunnel_id)?;
        log::info!("to_kcp_tonic conv:{}", conv);
        Ok(Box::new(EkhoKcpImp::new(
            conn,
            cancel_tx,
            conv,
            tunnel_id.to_owned(),
            ekho_config,
        )))
    }
}

impl IAsyncKcpTunnel for EkhoKcpImp {}

impl tokio::io::AsyncWrite for EkhoKcpImp {
    fn poll_write(
        self: std::pin::Pin<&mut Self>,
        cx: &mut std::task::Context<'_>,
        buf: &[u8],
    ) -> std::task::Poll<Result<usize, std::io::Error>> {
        let this = self.project();
        if buf.is_empty() {
            log::warn!("poll_write buf.is_empty()");
        }
        Pin::new(this.rb_send_tx).poll_write(cx, buf)
    }

    fn poll_flush(
        self: std::pin::Pin<&mut Self>,
        cx: &mut std::task::Context<'_>,
    ) -> std::task::Poll<Result<(), std::io::Error>> {
        let this = self.project();
        Pin::new(this.rb_send_tx).poll_flush(cx)
    }

    fn poll_shutdown(
        self: std::pin::Pin<&mut Self>,
        cx: &mut std::task::Context<'_>,
    ) -> std::task::Poll<Result<(), std::io::Error>> {
        let this = self.project();
        Pin::new(this.rb_send_tx).poll_shutdown(cx)
    }
}

impl tokio::io::AsyncRead for EkhoKcpImp {
    fn poll_read(
        self: std::pin::Pin<&mut Self>,
        cx: &mut std::task::Context<'_>,
        buf: &mut tokio::io::ReadBuf<'_>,
    ) -> std::task::Poll<std::io::Result<()>> {
        let this = self.project();
        Pin::new(this.rb_receive_rx).poll_read(cx, buf)
    }
}

// read from webrtc ice conn
async fn peer_receive_fn(
    peer_receive: &Arc<dyn webrtc_util::Conn + Send + Sync>,
    buffer_size: usize,
) -> Result<Vec<u8>, anyhow::Error> {
    let mut peer_receive_buffer = vec![0u8; buffer_size];
    let size = peer_receive.recv(&mut peer_receive_buffer).await?;
    Ok(peer_receive_buffer[..size].to_vec())
}

async fn send_rx_read(
    can_read_rx: &mut Receiver<bool>,
    send_rx: &mut Endpoint,
    send_rx_buffer: &mut [u8],
    // check_output_tx: &watch::Sender<bool>,
) -> Result<usize, anyhow::Error> {
    // log::info!("send_rx_read");
    let can_read = can_read_rx.borrow_and_update().clone();
    if !can_read {
        // 等待从send_rx(发送端包)中读取数据
        can_read_rx.changed().await?;
    }
    let size = send_rx.read(send_rx_buffer).await?;
    let res: Result<usize, anyhow::Error> = Ok(size);
    res
}

impl EkhoKcpImp {
    /// Creates a new session given a peer kcptunnelimp and a conv.
    pub fn new(
        peer: Arc<dyn webrtc_util::Conn + Send + Sync>,
        cancel_tx: tokio::sync::mpsc::Sender<()>,
        conv: u32,
        tunnel_id: String,
        config: Config,
    ) -> Self {
        let mut control = ControlBlock::new(conv, config.clone());

        log::info!("ekho_kcp config: {:?}", &config);
        let rec_window = config.recv_wnd as usize;
        let send_window = config.send_wnd as usize;
        // config.interval = 10;
        // buffer_size选取send_window和rec_window的和即可。
        // 最大发送和接收窗口就这么大，Endpoint再大没有意义。
        let buffer_size: usize = max(send_window, rec_window);
        let peer_send = peer.clone();
        // ringbuffer for send
        let (send_tx, mut send_rx) = Endpoint::pair(buffer_size, buffer_size);
        // ringbuffer for received
        let (mut receive_tx, receive_rx) = Endpoint::pair(buffer_size, buffer_size);
        let send_tx = send_tx.compat();
        let receive_rx = receive_rx.compat();

        let (send_endpoint_tx, mut send_endpoint_rx) =
            tokio::sync::mpsc::unbounded_channel::<Vec<u8>>();
        let (output_tx, mut output_rx) = tokio::sync::mpsc::unbounded_channel::<Vec<u8>>();
        let (receive_endpoint_tx, mut receive_endpoint_rx) =
            tokio::sync::mpsc::unbounded_channel::<Vec<u8>>();
        let peer_receive = peer.clone();
        let (can_read_tx, mut can_read_rx) =
            tokio::sync::watch::channel(control.wait_send() <= control.config().send_wnd as usize);
        let (check_output_tx, mut check_output_rx) = tokio::sync::watch::channel(false);

        let updater = async move {
            let mut interval_tick = interval(Duration::from_millis(config.interval as u64));
            // let mut interval_sleep = interval(Duration::from_millis(100));
            // let mut peer_receive_buffer = vec![0u8; buffer_size];
            // let mut send_rx_buffer = vec![0u8; buffer_size];
            let mut send_packet_num: u64 = 0;
            let mut peer_receive_fut = peer_receive_fn(&peer_receive, buffer_size);
            let mut peer_receive_pin = unsafe { Pin::new_unchecked(&mut peer_receive_fut) };
            let mut need_send_check;
            loop {
                // every select branch need send check.
                need_send_check = true;
                tokio::select! {
                    // timeout interval
                    _ = interval_tick.tick() => {
                        // log::info!("tokio::select! interval_tick.tick()");
                        control.flush();
                        if let Err(e) =  check_output_tx.send(false) {
                            log::error!("check_output_tx.send(false) err: {:?}", &e);
                        }
                    },

                    // every time branch out and loop again
                    _ = check_output_rx.changed() => {
                        need_send_check = false;
                        if check_output_rx.borrow_and_update().clone() {
                            if let Err(e) =  check_output_tx.send(false) {
                                log::error!("check_output_tx.send(false) err: {:?}", &e);
                            }
                            // read ouput from kcp
                            while let Some(raw) = control.output() {
                                // log::info!("control.output() while");
                                // dissect_headers_from_raw(&raw, "send");
                                if raw.is_empty() {
                                    continue;
                                }
                                // send to channel
                                match output_tx.send(raw) {
                                    //send to remote
                                    Ok(_) => {
                                        // log::debug!("conn.send size {}", size);
                                    }
                                    Err(e) => {
                                        log::error!("updater output_tx.send: {:?}", &e);
                                    }
                                }
                            }
                            loop {
                                // then read recv from kcp
                                match control.recv() {
                                    //received data
                                    Ok(data) => {
                                        if data.is_empty() {
                                            log::info!("control.recv() data.is_empty() break");
                                            break;
                                        }
                                        // quick send to channel
                                        match receive_endpoint_tx.send(data) {
                                            Ok(_) => {
                                                // log::debug!("conn.send size {}", size);
                                            }
                                            Err(e) => {
                                                log::error!("updater receive_endpoint_tx.send err {:?}", &e);
                                            }
                                        }
                                    }
                                    Err(Error::NotAvailable) => {
                                        // log::error!("receive_endpoint  NotAvailable err: {:?}", Error::NotAvailable.to_string());
                                        break;
                                    }
                                    Err(err) => {
                                        log::error!("receive_endpoint  kcp.recv() err: {:?}", &err);
                                        break;
                                    }
                                }
                            }
                        }
                    },

                    // read from webrtc peer,must input to kcp
                    res = &mut peer_receive_pin => {
                        //  if there ar any buffer,then send to channel and quit branch.
                        peer_receive_fut = peer_receive_fn(&peer_receive, buffer_size);
                        peer_receive_pin = unsafe { Pin::new_unchecked(&mut peer_receive_fut) };
                        match res {
                            Ok(data) => {
                                let size = data.len();
                                if size > 0 {
                                    let mut send_size = 0;
                                    while send_size < size {
                                        // input to kcp
                                        match control.input(&data[send_size..size]) {
                                            Ok(s) => {
                                                if s == 0 {
                                                    log::info!("control.input size 0 break");
                                                    break;
                                                } else {
                                                    // log::info!("control.input size: {:?}", s);
                                                }
                                                send_size = send_size + s;
                                            }
                                            Err(e) => {
                                                log::error!("peer_receive_pin control.input err: {:?}", &e);
                                            }
                                        }
                                    }
                                } else {
                                    log::info!(
                                        "peer_receive.recv size == 0"
                                    );
                                }

                            },
                            Err(e) => {
                                log::error!("peer_receive.recv err: {:?}", &e);
                                break;
                            }
                        }
                        // log::info!("tokio::select! peer_receive.recv end");
                    },

                    //  read from sender ring buffer,if there are any buffer then call control.send
                    res = send_endpoint_rx.recv() => {
                        // log::info!("tokio::select! send_rx_read");
                        match res {
                            // Ok(size) => {
                            Some(data) => {
                                let size = data.len();
                                if size > 0 {
                                    let mut send_size = 0;
                                    while send_size < size {
                                        // match control.send(&send_rx_buffer[send_size..size]) {
                                        match control.send(&data[send_size..size]) {
                                            Ok(s) => {
                                                if s == 0 {
                                                    log::warn!("control.send size 0 break!");
                                                    break;
                                                }
                                                send_size = send_size + s;
                                            }
                                            Err(e) => {
                                                log::error!("send_endpoint_rx.recv control.send err: {:?}", &e);
                                            }
                                        }
                                    }
                                    send_packet_num = send_packet_num + 1;
                                    if send_packet_num % 1000 == 0 {
                                        log::info!(" send_packet_num %1000 {}", send_packet_num);
                                    } else if send_packet_num % 100 == 0 {
                                        log::debug!("send_endpoint send_packet_num %100 {}", send_packet_num);
                                    } else if send_packet_num % 10 == 0 {
                                        log::trace!(
                                            "send_endpoint send_packet_num %10 {} send_size:{} ",
                                            send_packet_num,
                                            size
                                        );
                                    }
                                } else {
                                    log::warn!("send_rx.read size 0, closed");
                                    break;
                                }
                            }
                            None => {
                                log::error!("send_endpoint_rx.recv channel closed");
                            }
                        }
                    },
                }

                // 此处需要进行流控
                // https://github.com/skywind3000/kcp/wiki/Flow-Control-for-Users
                let wait_send = control.wait_send();
                let send_wnd = control.config().send_wnd;

                if let Err(e) = can_read_tx.send(wait_send <= send_wnd as usize) {
                    log::error!("can_read_tx.send err: {:?}", e);
                }
                if need_send_check {
                    if let Err(e) = check_output_tx.send(true) {
                        log::error!("check_output_tx.send(false) err: {:?}", &e);
                    }
                }
                // let peer_closing = peer_closing_cloned.load(Ordering::SeqCst);
                // let local_closing = local_closing_cloned.load(Ordering::SeqCst);
                if control.dead_link() {
                    if control.dead_link() {
                        log::error!("control dead link");
                    }
                    log::warn!("control.all_flushed()? break?");
                    break;
                }
                calcute_control_flow(tunnel_id.clone(), wait_send as u16, send_wnd);
            }
            // control flow break for conv
            let _ = get_or_set_control_flow_map(true, tunnel_id, None);

            // close peer because of control.dead_link()
            if let Err(err) = peer_receive.close().await {
                log::error!("control dead link close webrtc err:{:?}", err);
            }
            log::warn!("control kcp_ekho exit updater");
        };

        // read from send ring buffer,then select! branch will call send
        let send_endpoint = async move {
            let mut send_rx_buffer = vec![0u8; buffer_size];
            loop {
                match send_rx_read(&mut can_read_rx, &mut send_rx, &mut send_rx_buffer).await {
                    Ok(size) => {
                        if size > 0 {
                            if let Err(e) = send_endpoint_tx.send(send_rx_buffer[..size].to_vec()) {
                                log::error!("send_endpointt_tx.send err: {:?}", &e);
                            }
                        } else {
                            log::error!("send_rx_read size == 0");
                            break;
                        }
                    }
                    Err(e) => {
                        log::error!("send_rx_read err: {:?}", &e);
                        break;
                    }
                }
            }

            log::warn!("kcp_ekho exit send_endpoint");
        };

        // read from select! branch output and then send by webrtc
        let sender = async move {
            while let Some(raw) = output_rx.recv().await {
                // log::info!("output_rx.recv() start");
                if raw.is_empty() {
                    log::trace!("output_rx.recv() data.is_empty()");
                    continue;
                }
                match peer_send.send(&raw).await {
                    Ok(size) => {
                        if size == 0 {
                            log::warn!("peer_send.send size == 0");
                        }
                    }
                    Err(e) => {
                        log::error!("conn.send err: {:?}", &e);
                        // break;
                    }
                }
            }

            log::warn!("kcp_ekho exit sender");
        };

        // read from select! branch kcp.received package,and write to ringbuffer.
        let receive_endpoint = async move {
            let mut packet_num: u64 = 0;
            let _each_time = SystemTime::now();
            while let Some(data) = receive_endpoint_rx.recv().await {
                // log::info!("receive_endpoint_rx.recv() start");
                if data.is_empty() {
                    log::trace!("receive_endpoint_rx.recv() data.is_empty()");
                    continue;
                }
                let now = SystemTime::now();
                match receive_tx.write_all(&data).await {
                    Ok(_) => {
                        let _now = SystemTime::now();
                        let diff_us = _now.duration_since(now).unwrap_or_default().as_micros();
                        // now = _now;
                        if diff_us > 1000 {
                            log::trace!(
                                "receive_endpoint receive_tx.write_all size:{}  use time: {}us",
                                data.len(),
                                diff_us
                            );
                        }
                        packet_num = packet_num + 1;
                        if packet_num % 1000 == 0 {
                            log::info!("receive_endpoint packet_num %1000 {}", packet_num);
                        } else if packet_num % 100 == 0 {
                            log::debug!("receive_endpoint packet_num %100 {}", packet_num);
                        } else if packet_num % 10 == 0 {
                            log::trace!(
                                "receive_endpoint packet_num %10 {} packet_size:{} ",
                                packet_num,
                                data.len()
                            );
                        }
                    }
                    Err(e) => {
                        log::error!("receive_tx.write_all err: {:?}", &e);
                    }
                }
            }

            log::warn!("kcp_ekho exit receive_endpoint");
        };

        let runner = tokio::spawn(async move {
            tokio::select! {
                _  = updater => {},
                _  = send_endpoint => {},
                _  = sender => {},
                _  = receive_endpoint => {},
            }
            if let Err(e) = cancel_tx.send(()).await {
                log::error!("runner cancel_tx.send(()) err: {:?}", &e);
            } else {
                log::info!("runner end");
            }
        });

        EkhoKcpImp {
            conv,
            peer,
            runner,
            rb_send_tx: send_tx,
            rb_receive_rx: receive_rx,
        }
    }
}
