use std::{collections::VecDeque, io::IoSlice, time::Duration};

use crate::{
    config::http::core_conf::{
        BODY_BUFFER_MIN_WRITE_SIZE, MAX_CHUNKED_LINE_SIZE,
        MIN_BODY_WRITE_TIMEOUT_MS,
    },
    error::*,
    http::base::parse::{
        http_parse_chunked_size, http_read_crlf_line, parse_http_header,
    },
};
use bytes::{BufMut, Bytes, BytesMut};
use tokio::{
    io::{AsyncReadExt, AsyncWriteExt},
    time::timeout,
};

use crate::{
    config::http::core_conf::{MIN_BODY_BUFFER_SIZE, MIN_BODY_READ_TIMEOUT_MS},
    http::base::{conn::BoxHttpConn, headers::HttpHeaders, parse::HttpResult},
    utils::fixed_buffer::FixedBuffer,
};

#[derive(Clone, Copy, Debug)]
pub enum ChunkedParseState {
    Start,
    Data,
    AfterDataCRLF,
}

#[derive(Clone, Copy, Debug)]
pub struct ChunkedReaderState {
    state: ChunkedParseState,
    size: usize,
    remain: usize,
    body_done: bool,
}

#[allow(dead_code)]
#[derive(Debug)]
pub enum BodyReaderState {
    NoBody,
    ContentLength(usize), // remain
    Chunked(ChunkedReaderState),
    UntilClosed(bool), // read done
}

#[allow(dead_code)]
pub struct BodyReader {
    state: BodyReaderState,
    last_buf: FixedBuffer,
    buf_capacity: usize,
    read_timeout_ms: usize,

    // for http trailers.
    expect_trailers: usize,
    trailers: Option<HttpHeaders>,
    read_trailers_done: bool,
}

#[allow(dead_code)]
impl BodyReader {
    pub fn with_no_body(buf: Option<FixedBuffer>) -> Self {
        let last_buf = match buf {
            Some(b) => b,
            None => FixedBuffer::new(0),
        };

        BodyReader {
            state: BodyReaderState::NoBody,
            last_buf,
            buf_capacity: 0,
            read_timeout_ms: 0,
            expect_trailers: 0,
            trailers: None,
            read_trailers_done: false,
        }
    }

    pub fn with_cl_transfer(
        buf: Option<FixedBuffer>,
        cl: usize,
        mut buf_capacity: usize,
        mut read_timeout_ms: usize,
    ) -> Self {
        buf_capacity = buf_capacity.max(MIN_BODY_BUFFER_SIZE);
        read_timeout_ms = read_timeout_ms.max(MIN_BODY_READ_TIMEOUT_MS);

        if cl == 0 {
            buf_capacity = 0;
        }

        let last_buf = match buf {
            Some(b) => b,
            None => FixedBuffer::new(buf_capacity),
        };

        BodyReader {
            state: BodyReaderState::ContentLength(cl),
            last_buf,
            buf_capacity,
            read_timeout_ms,
            expect_trailers: 0,
            trailers: None,
            read_trailers_done: false,
        }
    }

    pub fn with_chunked_transfer(
        buf: Option<FixedBuffer>,
        mut buf_capacity: usize,
        mut read_timeout_ms: usize,
        expect_trailers: usize,
    ) -> Self {
        buf_capacity = buf_capacity.max(MIN_BODY_BUFFER_SIZE);
        read_timeout_ms = read_timeout_ms.max(MIN_BODY_READ_TIMEOUT_MS);

        let last_buf = match buf {
            Some(b) => b,
            None => FixedBuffer::new(buf_capacity),
        };

        let mut trailers = None;
        if expect_trailers > 0 {
            trailers = Some(HttpHeaders::new());
        }

        BodyReader {
            state: BodyReaderState::Chunked(ChunkedReaderState {
                state: ChunkedParseState::Start,
                size: 0,
                remain: 0,
                body_done: false,
            }),
            last_buf,
            buf_capacity,
            read_timeout_ms,
            expect_trailers,
            trailers,
            read_trailers_done: false,
        }
    }

    pub fn with_until_closed_transfer(
        buf: Option<FixedBuffer>,
        mut buf_capacity: usize,
        mut read_timeout_ms: usize,
    ) -> Self {
        buf_capacity = buf_capacity.max(MIN_BODY_BUFFER_SIZE);
        read_timeout_ms = read_timeout_ms.max(MIN_BODY_READ_TIMEOUT_MS);

        let last_buf = match buf {
            Some(b) => b,
            None => FixedBuffer::new(buf_capacity),
        };

        BodyReader {
            state: BodyReaderState::UntilClosed(false),
            last_buf,
            buf_capacity,
            read_timeout_ms,
            expect_trailers: 0,
            trailers: None,
            read_trailers_done: false,
        }
    }

    pub async fn read_body(
        &mut self,
        hc: &mut BoxHttpConn,
        bufs: &mut VecDeque<Bytes>,
    ) -> Result<HttpResult> {
        match self.state {
            // NoBody
            BodyReaderState::NoBody => Ok(HttpResult::Complete),

            // Content-Length
            BodyReaderState::ContentLength(_) => {
                self.do_content_length_read(hc, bufs).await
            }
            BodyReaderState::Chunked(_) => self.do_chunked_read(hc, bufs).await,
            BodyReaderState::UntilClosed(_) => {
                self.do_until_closed_read(hc, bufs).await
            }
        }
    }

    // 每次最多执行一次read操作.
    async fn do_content_length_read(
        &mut self,
        hc: &mut BoxHttpConn,
        bufs: &mut VecDeque<Bytes>,
    ) -> Result<HttpResult> {
        let mut read_num = 0;

        loop {
            let remain = match self.state {
                BodyReaderState::ContentLength(remain) => remain,
                _ => {
                    unreachable!("it's a content-length reader");
                }
            };

            if remain == 0 {
                return Ok(HttpResult::Complete);
            }

            let last_buf = &mut self.last_buf;
            let read_slice_len = last_buf.read_slice_len();

            let todo = remain.min(read_slice_len);

            // has data.
            if todo > 0 {
                let bytes_buf = last_buf.split_to(todo);

                bufs.push_back(bytes_buf);
                self.state = BodyReaderState::ContentLength(remain - todo);
                continue;
            }

            if read_num > 1 {
                return Ok(HttpResult::Partial);
            }

            read_num += 1;

            // Buffer中的可写空间太小了, 应该更换Buffer了.
            // 更换更大的Buffer有助于降低read syscall的调用次数.
            let write_slice_len = last_buf.write_slice_len();
            if write_slice_len < BODY_BUFFER_MIN_WRITE_SIZE {
                self.last_buf = FixedBuffer::new(self.buf_capacity);
            }

            let last_buf = &mut self.last_buf;
            let write_slice = last_buf.write_slice();

            let read_event = hc.read(write_slice);
            let read_event = timeout(
                Duration::from_millis(self.read_timeout_ms as u64),
                read_event,
            );

            match read_event.await {
                Ok(res) => match res {
                    Ok(0) => {
                        return Error::e_explain(
                            ErrorType::ReadEOF,
                            "read content-length body EOF",
                        );
                    }

                    Ok(n) => {
                        last_buf.fill(n);
                        continue;
                    }

                    Err(e) => {
                        return Err(Error::from_error(
                            ErrorType::ReadError,
                            "read content-length body error",
                            e,
                        ));
                    }
                },

                Err(_) => {
                    return Error::e_explain(
                        ErrorType::ReadTimedout,
                        "read content-length body timedout",
                    );
                }
            }
        }
    }

    async fn do_chunked_read(
        &mut self,
        hc: &mut BoxHttpConn,
        bufs: &mut VecDeque<Bytes>,
    ) -> Result<HttpResult> {
        let mut read_num = 0;

        loop {
            loop {
                // 每次循环都重新获取一下chunked_state.
                let chunked_state = match &mut self.state {
                    BodyReaderState::Chunked(chunked_reader_state) => {
                        chunked_reader_state
                    }
                    _ => {
                        unreachable!("it's a chunked reader");
                    }
                };

                if chunked_state.body_done {
                    return self.read_trailers(hc).await;
                }

                let last_buf = &mut self.last_buf;
                let read_slice = last_buf.read_slice();

                match chunked_state.state {
                    ChunkedParseState::Start => {
                        match http_read_crlf_line(read_slice) {
                            // 成功读取到chunked size line.
                            Some(size) => {
                                let chunked_size = http_parse_chunked_size(
                                    &read_slice[0..size],
                                )?;
                                last_buf.consume(size);
                                chunked_state.state = ChunkedParseState::Data;
                                chunked_state.size = chunked_size;
                                chunked_state.remain = chunked_size;
                                continue;
                            }

                            // 当前数据不足一行.
                            None => {
                                if read_slice.len() > MAX_CHUNKED_LINE_SIZE {
                                    return Error::e_explain(
                                        ErrorType::InvalidChunkedLine,
                                        "chunked size line is too larger",
                                    );
                                }
                                // no more data.
                                break;
                            }
                        }
                    }

                    ChunkedParseState::Data => {
                        let mut remain = chunked_state.remain;
                        let todo = remain.min(read_slice.len());
                        if todo > 0 {
                            let bytes_buf = last_buf.split_to(todo);
                            bufs.push_back(bytes_buf);
                            remain -= todo;
                        }

                        if remain == 0 {
                            chunked_state.state =
                                ChunkedParseState::AfterDataCRLF;
                            chunked_state.remain = 0;
                            continue;
                        }

                        // no more data.
                        break;
                    }

                    ChunkedParseState::AfterDataCRLF => {
                        match http_read_crlf_line(read_slice) {
                            Some(res) => {
                                if res != 2 {
                                    return Error::e_explain(
                                        ErrorType::InvalidChunkedLine,
                                        "invalid chunked after data crlf",
                                    );
                                }
                                last_buf.consume(res);

                                let body_done = chunked_state.size == 0;
                                chunked_state.state = ChunkedParseState::Start;
                                chunked_state.body_done = body_done;
                                continue;
                            }
                            None => {
                                if read_slice.len() >= 2 {
                                    return Error::e_explain(
                                        ErrorType::InvalidChunkedLine,
                                        "invalid chunked after data crlf",
                                    );
                                }

                                // no more data
                                break;
                            }
                        }
                    }
                }
            }

            if read_num > 1 {
                return Ok(HttpResult::Partial);
            }

            read_num += 1;

            // Buffer中的可写空间太小了, 应该更换Buffer了.
            // 更换更大的Buffer有助于降低read syscall的调用次数.
            let last_buf = &mut self.last_buf;
            let write_slice_len = last_buf.write_slice_len();
            if write_slice_len < BODY_BUFFER_MIN_WRITE_SIZE {
                let read_slice = self.last_buf.read_slice();

                let mut new_buf = FixedBuffer::new(self.buf_capacity);
                // 将last_buf中的残留数据拷贝到新的buf中.
                if !read_slice.is_empty() {
                    new_buf.put_slice(read_slice);
                }
                self.last_buf = new_buf;
            }

            let last_buf = &mut self.last_buf;
            let write_slice = last_buf.write_slice();
            let read_event = hc.read(write_slice);
            let read_event = timeout(
                Duration::from_millis(self.read_timeout_ms as u64),
                read_event,
            );

            match read_event.await {
                Ok(res) => match res {
                    Ok(0) => {
                        return Error::e_explain(
                            ErrorType::ReadEOF,
                            "read chunked body EOF",
                        );
                    }

                    Ok(n) => {
                        last_buf.fill(n);
                        continue;
                    }

                    Err(e) => {
                        return Err(Error::from_error(
                            ErrorType::ReadError,
                            "read chunked body error",
                            e,
                        ));
                    }
                },

                Err(_) => {
                    return Error::e_explain(
                        ErrorType::ReadTimedout,
                        "read chunked body timedout",
                    );
                }
            }
        }
    }

    async fn do_until_closed_read(
        &mut self,
        hc: &mut BoxHttpConn,
        bufs: &mut VecDeque<Bytes>,
    ) -> Result<HttpResult> {
        let mut read_num = 0;

        loop {
            match self.state {
                BodyReaderState::UntilClosed(body_done) => {
                    if body_done {
                        return Ok(HttpResult::Complete);
                    }
                }
                _ => {
                    unreachable!("it's a until-closed reader");
                }
            }

            let last_buf = &mut self.last_buf;
            let read_slice_len = last_buf.read_slice_len();

            if read_slice_len > 0 {
                let bytes_buf = last_buf.split_to(read_slice_len);
                bufs.push_back(bytes_buf);
            }

            if read_num > 1 {
                return Ok(HttpResult::Partial);
            }

            read_num += 1;

            // Buffer中的可写空间太小了, 应该更换Buffer了.
            // 更换更大的Buffer有助于降低read syscall的调用次数.
            let write_slice_len = last_buf.write_slice_len();
            if write_slice_len < BODY_BUFFER_MIN_WRITE_SIZE {
                self.last_buf = FixedBuffer::new(self.buf_capacity);
            }

            let last_buf = &mut self.last_buf;
            let write_slice = last_buf.write_slice();
            let read_event = hc.read(write_slice);
            let read_event = timeout(
                Duration::from_millis(self.read_timeout_ms as u64),
                read_event,
            );

            match read_event.await {
                Ok(res) => match res {
                    // UntilClosed, 遇到Read EOF表示读取完毕.
                    Ok(0) => {
                        self.state = BodyReaderState::UntilClosed(true);
                        return Ok(HttpResult::Complete);
                    }

                    Ok(n) => {
                        last_buf.fill(n);
                        continue;
                    }

                    Err(e) => {
                        return Err(Error::from_error(
                            ErrorType::ReadEOF,
                            "read until-closed body error",
                            e,
                        ));
                    }
                },
                Err(_) => {
                    return Error::e_explain(
                        ErrorType::ReadTimedout,
                        "read until-closed body timedout",
                    );
                }
            }
        }
    }

    async fn read_trailers(
        &mut self,
        hc: &mut BoxHttpConn,
    ) -> Result<HttpResult> {
        if self.expect_trailers == 0 {
            self.read_trailers_done = true;
            return Ok(HttpResult::Complete);
        }

        loop {
            let trailers = &self.trailers.as_mut().unwrap();
            if self.expect_trailers == trailers.len() {
                self.read_trailers_done = true;
                return Ok(HttpResult::Complete);
            }

            loop {
                let read_slice = self.last_buf.read_slice();
                match http_read_crlf_line(read_slice) {
                    Some(size) => {
                        let res = parse_http_header(&read_slice[0..size])?;
                        self.trailers.as_mut().unwrap().append_header_with_kv(
                            Bytes::copy_from_slice(res.0),
                            Bytes::copy_from_slice(res.1),
                        );

                        self.last_buf.consume(size);
                        if self.expect_trailers
                            == self.trailers.as_ref().unwrap().len()
                        {
                            self.read_trailers_done = true;
                            return Ok(HttpResult::Complete);
                        }

                        continue;
                    }

                    // no more data
                    None => {
                        if read_slice.len()
                            > MIN_BODY_BUFFER_SIZE - BODY_BUFFER_MIN_WRITE_SIZE
                        {
                            return Error::e_explain(
                                ErrorType::BigHeaderLine,
                                "found big trailers header line",
                            );
                        }
                        break;
                    }
                }
            }

            // Buffer中的可写空间太小了.
            let write_slice_len = self.last_buf.write_slice_len();
            if write_slice_len < BODY_BUFFER_MIN_WRITE_SIZE {
                self.last_buf = FixedBuffer::new(MIN_BODY_BUFFER_SIZE);
            }

            let last_buf = &mut self.last_buf;
            let write_slice = last_buf.write_slice();
            let read_event = hc.read(write_slice);
            let read_event = timeout(
                Duration::from_millis(self.read_timeout_ms as u64),
                read_event,
            );

            match read_event.await {
                Ok(res) => match res {
                    Ok(0) => {
                        return Error::e_explain(
                            ErrorType::ReadEOF,
                            "read trailer headers EOF",
                        );
                    }

                    Ok(n) => {
                        last_buf.fill(n);
                        continue;
                    }

                    Err(e) => {
                        return Err(Error::from_error(
                            ErrorType::ReadError,
                            "read trailer headers error",
                            e,
                        ));
                    }
                },

                Err(_) => {
                    return Error::e_explain(
                        ErrorType::ReadTimedout,
                        "read trailer headers timedout",
                    );
                }
            }
        }
    }

    pub fn get_trailers(&self) -> Result<Option<&HttpHeaders>> {
        // 仅在chunked transfer场景下有trailer headers.
        match &self.state {
            BodyReaderState::Chunked(_) => {}
            _ => {
                return Ok(None);
            }
        }

        if !self.read_trailers_done {
            return Error::e_explain(
                ErrorType::InternalError,
                "The trailer headers has not bee read yet",
            );
        }

        Ok(self.trailers.as_ref())
    }

    pub fn done(&self) -> bool {
        match self.state {
            BodyReaderState::NoBody => true,
            BodyReaderState::ContentLength(remain) => remain == 0,
            BodyReaderState::Chunked(_) => self.read_trailers_done,
            BodyReaderState::UntilClosed(done) => done,
        }
    }

    pub fn get_remaining_data(self) -> Bytes {
        self.last_buf.freeze()
    }
}

pub enum BodyWriterState {
    NoBody,
    ContentLength(usize), // remain
    Chunked(bool),
    UntilClosed(bool), // write done
}

#[allow(dead_code)]
pub struct BodyWriter {
    state: BodyWriterState,
    write_timeout_ms: usize,

    // for http trailers
    expect_trailers: usize,
}

#[allow(dead_code)]
impl BodyWriter {
    pub fn with_no_body() -> Self {
        BodyWriter {
            state: BodyWriterState::NoBody,
            write_timeout_ms: 0,
            expect_trailers: 0,
        }
    }

    pub fn with_cl_transfer(cl: usize, mut write_timeout_ms: usize) -> Self {
        write_timeout_ms = write_timeout_ms.max(MIN_BODY_WRITE_TIMEOUT_MS);
        BodyWriter {
            state: BodyWriterState::ContentLength(cl),
            write_timeout_ms,
            expect_trailers: 0,
        }
    }

    pub fn with_chunked_transfer(
        mut write_timeout_ms: usize,
        expect_trailers: usize,
    ) -> Self {
        write_timeout_ms = write_timeout_ms.max(MIN_BODY_WRITE_TIMEOUT_MS);

        BodyWriter {
            state: BodyWriterState::Chunked(false),
            write_timeout_ms,
            expect_trailers,
        }
    }

    pub fn with_until_closed_transfer(mut write_timeout_ms: usize) -> Self {
        write_timeout_ms = write_timeout_ms.max(MIN_BODY_WRITE_TIMEOUT_MS);
        BodyWriter {
            state: BodyWriterState::UntilClosed(false),
            write_timeout_ms,
            expect_trailers: 0,
        }
    }

    pub async fn write_body(
        &mut self,
        hc: &mut BoxHttpConn,
        bufs: &mut VecDeque<Bytes>,
        done: bool,
    ) -> Result<HttpResult> {
        match &self.state {
            BodyWriterState::NoBody => Ok(HttpResult::Complete),
            BodyWriterState::ContentLength(_) => {
                self.do_content_length_write(hc, bufs, done).await
            }
            BodyWriterState::Chunked(_) => {
                self.do_chunked_write(hc, bufs, done).await
            }
            BodyWriterState::UntilClosed(_) => {
                self.do_until_closed_write(hc, bufs, done).await
            }
        }
    }

    async fn do_content_length_write(
        &mut self,
        hc: &mut BoxHttpConn,
        bufs: &mut VecDeque<Bytes>,
        done: bool,
    ) -> Result<HttpResult> {
        let mut remain = match self.state {
            BodyWriterState::ContentLength(remain) => remain,
            _ => {
                unreachable!("it's a content-length writer");
            }
        };

        if remain == 0 {
            return Ok(HttpResult::Complete);
        }

        let bufs_len = bufs.len();
        // bufs长度可能为0, 发送者可能只想传递一个done flag.
        if bufs_len == 0 {
            // nothing
        } else if bufs_len == 1 {
            let mut slice: &[u8] = &bufs[0];
            if remain >= slice.len() {
                remain -= slice.len();
            } else {
                slice = &slice[0..remain];
                remain = 0;
            }

            let write_event = hc.write_all(slice);
            let write_event = timeout(
                Duration::from_millis(self.write_timeout_ms as u64),
                write_event,
            );

            write_event
                .await
                .if_err(
                    ErrorType::WriteTimedout,
                    "write content-length body timedout",
                )?
                .if_err(
                    ErrorType::WriteError,
                    "write content-length body error",
                )?;
        } else {
            let mut tmp: Vec<IoSlice<'_>> = Vec::with_capacity(bufs_len);
            for v in bufs.iter() {
                if remain >= v.len() {
                    tmp.push(IoSlice::new(v));
                    remain -= v.len();
                    continue;
                }

                if remain == 0 {
                    break;
                }

                tmp.push(IoSlice::new(&v[0..remain]));
                remain = 0;
                break;
            }

            let write_event = hc.write_vectored_all(&tmp);
            let write_event = timeout(
                Duration::from_millis(self.write_timeout_ms as u64),
                write_event,
            );

            write_event
                .await
                .if_err(
                    ErrorType::WriteTimedout,
                    "write content-length body timedout",
                )?
                .if_err(
                    ErrorType::WriteError,
                    "write content-length body error",
                )?;
        }

        bufs.clear();

        // 发送者告知body已经发送完毕了, 但是BodyWriter发送的数据不足.
        if done && remain != 0 {
            return Error::e_explain(
                ErrorType::InternalError,
                "Unexpected body done flag",
            );
        }

        self.state = BodyWriterState::ContentLength(remain);

        if remain == 0 {
            return Ok(HttpResult::Complete);
        }

        Ok(HttpResult::Partial)
    }

    async fn do_chunked_write(
        &mut self,
        hc: &mut BoxHttpConn,
        bufs: &mut VecDeque<Bytes>,
        done: bool,
    ) -> Result<HttpResult> {
        match self.state {
            BodyWriterState::Chunked(done) => {
                if done {
                    return Ok(HttpResult::Complete);
                }
            }
            _ => {
                unreachable!("it's a chunked writer");
            }
        }

        let mut len = 0;
        for v in bufs.iter() {
            len += v.len();
        }

        if len > 0 {
            let mut buf = BytesMut::with_capacity(128);
            buf.put_slice(format!("{:x}", len).as_bytes());
            buf.put_slice(b"\r\n");
            // push chunked size line
            bufs.push_front(buf.freeze());
            // push after data crlf.
            bufs.push_back(Bytes::from_static(b"\r\n"));
        }

        // append last chunke
        if done {
            bufs.push_back(Bytes::from_static(b"0\r\n\r\n"));
        }

        let bufs_len = bufs.len();

        if bufs_len == 0 {
            // nothing
        } else if bufs_len == 1 {
            let write_event = hc.write_all(&bufs[0]);
            let write_event = timeout(
                Duration::from_millis(self.write_timeout_ms as u64),
                write_event,
            );

            write_event
                .await
                .if_err(ErrorType::WriteTimedout, "write chunked body timeout")?
                .if_err(ErrorType::WriteError, "write chunked body error")?;
        } else {
            let mut tmp: Vec<IoSlice<'_>> = Vec::with_capacity(bufs_len);
            for v in bufs.iter() {
                tmp.push(IoSlice::new(v));
            }

            let write_event = hc.write_vectored(&tmp);
            let write_event = timeout(
                Duration::from_millis(self.write_timeout_ms as u64),
                write_event,
            );

            write_event
                .await
                .if_err(ErrorType::WriteTimedout, "write chunked body timeout")?
                .if_err(ErrorType::WriteError, "write chunked body error")?;
        }

        bufs.clear();

        if done {
            self.state = BodyWriterState::Chunked(true);
            return Ok(HttpResult::Complete);
        }

        Ok(HttpResult::Partial)
    }

    async fn do_until_closed_write(
        &mut self,
        hc: &mut BoxHttpConn,
        bufs: &mut VecDeque<Bytes>,
        done: bool,
    ) -> Result<HttpResult> {
        match self.state {
            BodyWriterState::UntilClosed(done) => {
                if done {
                    return Ok(HttpResult::Complete);
                }
            }
            _ => {
                unreachable!("it's a until-closed writer");
            }
        }

        let bufs_len = bufs.len();

        // bufs长度可能为0, 发送者可能只想传递一个done flag.
        if bufs_len == 0 {
            // nothing
        } else if bufs_len == 1 {
            let write_event = hc.write_all(&bufs[0]);
            let write_event = timeout(
                Duration::from_millis(self.write_timeout_ms as u64),
                write_event,
            );

            write_event
                .await
                .if_err(
                    ErrorType::WriteTimedout,
                    "write until-closed body timedout",
                )?
                .if_err(
                    ErrorType::WriteError,
                    "write until-closed body error",
                )?;
        } else {
            let mut tmp: Vec<IoSlice<'_>> = Vec::with_capacity(bufs_len);
            for v in bufs.iter() {
                tmp.push(IoSlice::new(v));
            }

            let write_event = hc.write_vectored(&tmp);
            let write_event = timeout(
                Duration::from_millis(self.write_timeout_ms as u64),
                write_event,
            );

            write_event
                .await
                .if_err(
                    ErrorType::WriteTimedout,
                    "write until-closed body timeout",
                )?
                .if_err(
                    ErrorType::WriteError,
                    "write until-closed body error",
                )?;
        }

        bufs.clear();

        if done {
            self.state = BodyWriterState::UntilClosed(true);
            return Ok(HttpResult::Complete);
        }

        Ok(HttpResult::Partial)
    }

    pub fn done(&self) -> bool {
        match self.state {
            BodyWriterState::NoBody => true,
            BodyWriterState::ContentLength(remain) => remain == 0,
            BodyWriterState::Chunked(done) => done,
            BodyWriterState::UntilClosed(done) => done,
        }
    }

    pub async fn write_trailers(
        &mut self,
        hc: &mut BoxHttpConn,
        headers: &HttpHeaders,
    ) -> Result<()> {
        match self.state {
            BodyWriterState::Chunked(done) => {
                if !done {
                    return Error::e_explain(
                        ErrorType::InternalError,
                        "response body has not been sent completely",
                    );
                }
            }
            _ => {
                return Error::e_explain(
                    ErrorType::InternalError,
                    "expect a chunked writer",
                );
            }
        }

        if headers.len() != self.expect_trailers {
            return Error::e_explain(
                ErrorType::InvalidTrailers,
                format!(
                    "expect resp trailers header num: {}, but got none",
                    self.expect_trailers
                ),
            );
        }

        if self.expect_trailers == 0 {
            return Ok(());
        }

        let mut buf = BytesMut::with_capacity(256);
        for v in headers.iter() {
            // name: value\r\n
            buf.put_slice(v.get_name());
            buf.put_slice(b": ");
            buf.put_slice(v.get_value());
            buf.put_slice(b"\r\n");
        }

        let buf = buf.freeze();
        let write_event = hc.write_all(&buf);
        let write_event = timeout(
            Duration::from_millis(self.write_timeout_ms as u64),
            write_event,
        );

        write_event
            .await
            .if_err(ErrorType::WriteTimedout, "write chunked body timeout")?
            .if_err(ErrorType::WriteError, "write chunked body error")
    }
}
