/*  Layer 4 RingBuffer
 *  Copyright (C) 2016 ZHANG X. <201560039@uibe.edu.cn>
 *  __ |  \   /   __| 
 *    /     /    |    
 *  ___|  _/ _\ \___| 
 *
 *  **All Rights Reserved**
 */
#ifndef L4RB_H
#define L4RB_H

#include "ling_platform.h"
#include "ringbuffer.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_POSIX
#include <sys/socket.h>
#include <sys/types.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <netinet/in.h>
#include <sys/epoll.h>
#include <fcntl.h>
#endif /* HAVE_POSIX */
#ifdef HAVE_WINDOWS
#include <windows.h>
#include <winsock2.h>
#include <ws2tcpip.h>
#endif /* HAVE_WINDOWS */

#define QUEUE_LEN 32
#define FD_DOWN -1

/*  TODO:
 *  1. Support kqueue
 *  2. Support IOCP
 *  3. WebSocket
 *  4. AppServer
 */
union Addr_in{
    struct sockaddr_in ipv4;
    struct sockaddr_in6 ipv6;
};

struct L4conn{
#ifdef HAVE_SSL
    mbedtls_net_context fd;
#else /* HAVE_SSL */
    int fd;
#endif /* HAVE_SSL */
    uint32_t rb_head;
    uint32_t rb_tail;
    union Addr_in addr;
};
/*  Caller allocate.    */
struct L4rb{
    /*  Caller allocate.    */
    struct Ring *rb;
#ifdef HAVE_SSL
    mbedtls_net_context listen;
#else /* HAVE_SSL */
    int32_t listen;
#endif /* HAVE_SSL */
    
#ifdef HAVE_EPOLL
    int32_t queue;
    struct epoll_event ev[QUEUE_LEN];
#endif /* HAVE_EPOLL */
    int32_t queue_len;
    uint32_t max_conn;
    uint32_t inten_conn;
    uint32_t new_conn;
    struct L4conn conn[];
};
int l4rb_queue_init(struct L4rb *, uint32_t max_conn);
int l4rb_listen(struct L4rb *, char *);
uint32_t l4rb_poll(struct L4rb *, int timeout);
inline char * l4rb_rx(struct L4rb *l4rb, size_t size){
    uint32_t new_head = ring_collect(l4rb->rb,
            l4rb->conn[l4rb->inten_conn].rb_head, size);
    l4rb->conn[l4rb->inten_conn].rb_head = new_head;
    if(RING_CONN_NEXT(l4rb->rb, new_head) == NO_SUCCEEDING)
        l4rb->conn[l4rb->inten_conn].rb_tail = new_head;
    if((size_t)RING_SEG_LEN(l4rb->rb, new_head) >= size){
        return RING_SEG_DATA(l4rb->rb, new_head);
    }
    return NULL;
}
inline void l4rb_yield(struct L4rb *l4rb){
    if(l4rb->inten_conn == INACTIVE_CONN)return;
    l4rb->inten_conn = ring_next_conn(l4rb->rb,
            l4rb->conn[l4rb->inten_conn].rb_tail);
}
inline void l4rb_queue_close(struct L4rb *l4rb){
#ifdef HAVE_EPOLL
    close(l4rb->queue);
#endif /*HAVE_EPOLL */
}
inline void l4rb_close(struct L4rb *l4rb){
    close(l4rb->listen);
}
/*  Close a client  */
inline void _l4rb_dequeue(struct L4rb *l4rb, struct L4conn *l4conn){
    /*  Already closed  */
    if(l4conn->fd == FD_DOWN)return;
    epoll_ctl(l4rb->queue, EPOLL_CTL_DEL, l4conn->fd, NULL);
    close(l4conn->fd);
    l4conn->fd = FD_DOWN;
}
/*  When accept a new client    */
inline uint32_t _l4rb_enqueue(struct L4rb *l4rb, struct L4conn *l4conn){
    /*  l4rb->max_conn = 2^n - 1,
     *  l4rb->new_conn % l4rb->max_conn */
    uint32_t new_conn_n = l4rb->new_conn & l4rb->max_conn;
    struct epoll_event ev
        = {.events = EPOLLIN,
            .data.u32 = new_conn_n};
    if(epoll_ctl(l4rb->queue, EPOLL_CTL_ADD, l4conn->fd, &ev)
            == -1) goto fallback;
    return new_conn_n;
fallback:
    close(l4conn->fd);
    return INACTIVE_CONN;
}

inline void l4rb_drop(struct L4rb *l4rb, uint32_t size){
    ring_drop_conn(l4rb->rb, &(l4rb->conn[l4rb->inten_conn].rb_head), size);
}
inline void l4rb_drop_client(struct L4rb *l4rb){
    l4rb_drop(l4rb, NO_SUCCEEDING);
    _l4rb_dequeue(l4rb, l4rb->conn + l4rb->inten_conn);
    l4rb_yield(l4rb);
}
inline int l4rb_socket(struct L4rb *l4rb){
    return l4rb->conn[l4rb->inten_conn].fd;
}
inline size_t l4rb_tx(struct L4rb *l4rb, char *buffer, size_t length){
    size_t rmn = length;
    for(int sent = 0;
            rmn > 0 && sent != -1;
            rmn -= sent){
        sent = send(l4rb_socket(l4rb),
                buffer+length-rmn,
                rmn, 0);
    }
    return rmn;
}
inline size_t l4rb_length(struct L4rb *l4rb){
    return (size_t)RING_SEG_LEN(l4rb->rb, l4rb->conn[l4rb->inten_conn].rb_head);
}
#endif /* L4RB_H */
