/***
    Amrita ITEWS - Copyright (C) 2009 Amrita Vishwa Vidyapeetham, Amritapuri.
                                       (http://www.amrita.edu)
    ***************************************************************************
    This file is part of the Amrita ITEWS distribution.
    Amrita ITEWS is free software; you can redistribute it and/or modify it
    under the terms of the GNU General Public License (version 2) as published
    by the Free Software Foundation AND MODIFIED BY the Amrita ITEWS exception.
    ***NOTE*** The exception to the GPL is included to allow you to distribute
    a combined work that includes Amrita ITEWS without being obliged to provide
    the source code for proprietary components outside of the Amrita ITEWS
    software. Amrita ITEWS is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
    Public License for more details. You should have received a copy of the GNU
    General Public License and the Amrita ITEWS license exception along with
    Amrita ITEWS if not then it can be viewed here:
    http://itews.amrita.ac.in/license.html.

    Note: This software is closely coupled to FreeRTOS and UIP and hence the
    licensing terms of FreeRTOS and UIP would apply.


    Documentation, latest information, license and contact details are at:
    http://itews.amrita.ac.in/


    Amrita ITEWS source code can be found at:
    http://code.google.com/p/itews


    The initial code-base of Amrita ITEWS was developed by Amrita Vishwa
    Vidyapeetham as part of the project titled,"Intelligent & Interactive
    Telematics using Emerging Wireless Technologies for Transport Systems
    (ITEWS)" sponsored by Technology Information Forecasting and Assessment
    Council (TIFAC), India.
***/

/*
A socket interface to UIP running as a TCP/IP server task under FreeRTOS.  Developed
by Amrita Research Labs as part of ITEWS project.
*/
#include <typedefs.h>
#include <FreeRTOS.h>
#include <timer.h>
#include <task.h>
#include <queue.h>
#include <uip.h>
#include <mutex_api.h>
#include <socket.h>
#include <uip_task.h>
#include <errno.h>
#include <klog.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <dgram_eventq.h>

#define SOCKET_CLOSED                       0x0000
#define SOCKET_OPEN                         0x0001
#define SOCKET_LISTEN                       0x0002
#define SOCKET_ACTIVE                       0x0004
#define SOCKET_READ_PENDING                 0x0008
#define SOCKET_IBUF_PENDING                 0x0010
#define SOCKET_WRITE_PENDING                0x0020
#define SOCKET_CLOSE_PENDING                0x0040
#define SOCKET_PENDING_UIP_CONNECT          0x0080
#define SOCKET_PENDING_UIP_LISTEN           0x0100
#define SOCKET_ZOMBIE                       0x0200
#define SOCKET_WRITE_ACK_WAIT               0x0400
#define SOCKET_UDP_SEND_TO_PENDING          0x0800
#define SOCKET_PENDING_UIP_UDP_CONNECT      0X1000

extern struct uip_eth_addr uip_ethaddr;

static socket_t sockets[SOCKET_MAX];

static socket_t *lh_open;

#define SOCKET_CHECK_BIND    0x01
#define SOCKET_CHECK_ZOMBIE  0x02

int32 _socket_lock_and_check(int fd, uint32 check);
void _socket_list_add(socket_t **list, socket_t *p_list_head);
void socket_list_remove_(socket_t **list, socket_t *p_list_head);
void socket_ip_aton_(uint8 *ipbuf, int ipbuf_sz, const char *ipstr);
void socket_ip_ntoa_(char *ipstr, int ipstr_sz, const uint8 *ipbuf);

int32 _socket_buf_copy(void *buf, socket_buf_t *sbuf, uint32 len);

/*
 * Note: functions ending in '_' are called by the uip_task
 * functions starting with '_' are called by all application tasks
 * functions starting and ending with '_' are called by uip_task as well as
 * all application tasks
 */

int32 socket_init(const char *ip, const char *netmask, const uint8 *mac, const char* dr)
{
    int i;
    uint8   ipbuf[4];
    uint8   drbuf[4];
    uint8   netmaskbuf[4];
    int status;

    socket_ip_aton_(ipbuf, sizeof(ipbuf), ip);
    socket_ip_aton_(drbuf, sizeof(drbuf), dr);

    socket_ip_aton_(netmaskbuf, sizeof(netmaskbuf), netmask);

    uip_task_init(ipbuf, netmaskbuf, mac, drbuf);

    for(i=0;i<SOCKET_MAX;i++)
    {
         if((status=mutex_create(&sockets[i].mutex)) < 0)
         {
            klog_error("%s:%d> Can't create mutex for socket: %d. Error: %ld\n"
                    , __FILE__, __LINE__, i, status);
         }
    }
    return 0;
}


int32 _socket_open()
{
    int32 i;
    int   got_socket=0;

    for(i=0;i<SOCKET_MAX;i++)
    {
        if(SOCKET_CLOSED == sockets[i].state)
        {
            //we have a closed socket
            //now lock and check again
            //to avoid race condition
            mutex_lock(sockets[i].mutex);
            if(SOCKET_CLOSED == sockets[i].state)
            {
                //ok now we're sure
                got_socket=1;
                sockets[i].conn = (struct uip_conn*)0UL;
                sockets[i].udp_conn = (struct uip_udp_conn*)0UL;
                sockets[i].state = SOCKET_OPEN;
                sockets[i].rw_timer.interval = (clock_time_t)-1;
                taskENTER_CRITICAL();
                {
                    _socket_list_add(&lh_open, &sockets[i]);
                }
                taskEXIT_CRITICAL();
            }
            mutex_unlock(sockets[i].mutex);
            if(got_socket)
            {
                break;
            }
        }
    }

    if( i >= SOCKET_MAX )
    {
        i = -1;
    }

    return i;
}

int32 _socket_bind(int fd, uint16 port)
{
    int32 status;
    status = _socket_lock_and_check(fd, SOCKET_CHECK_ZOMBIE);

    if(status < 0)
    {
        mutex_unlock(sockets[fd].mutex);
        return status;
    }

    sockets[fd].lport = port;

    mutex_unlock(sockets[fd].mutex);
    return 0;
}

int32 _socket_listen(int fd)
{
    int32 status;
    status = _socket_lock_and_check(fd, SOCKET_CHECK_ZOMBIE|SOCKET_CHECK_BIND);

    if(status < 0)
    {
        mutex_unlock(sockets[fd].mutex);
        return status;
    }

    sockets[fd].state |= SOCKET_LISTEN;

    mutex_unlock(sockets[fd].mutex);
    return 0;
}

int32 _socket_accept(int fd, stream_param_t *buf)
{
    int32 status;
    status = _socket_lock_and_check(fd, SOCKET_CHECK_ZOMBIE|SOCKET_CHECK_BIND);

    if(status < 0)
    {
        mutex_unlock(sockets[fd].mutex);
        return status;
    }

    sockets[fd].req_ws = CURR_WAIT_STRUCT();
    sockets[fd].req_buf = buf;

    sockets[fd].state |= SOCKET_LISTEN;
    sockets[fd].state |= SOCKET_PENDING_UIP_LISTEN;

    mutex_unlock(sockets[fd].mutex);
    TASK_WAIT(CURR_WAIT_STRUCT(), &status);

    return status;
}

int32 _socket_connect(int fd, stream_param_t *buf)
{
    int32 status;

    status = _socket_lock_and_check(fd, SOCKET_CHECK_ZOMBIE);
    if(status < 0)
    {
        mutex_unlock(sockets[fd].mutex);
        return status;
    }

    sockets[fd].req_ws = CURR_WAIT_STRUCT();
    sockets[fd].req_buf = buf;

    sockets[fd].state |= SOCKET_PENDING_UIP_CONNECT;

    mutex_unlock(sockets[fd].mutex);
    TASK_WAIT(CURR_WAIT_STRUCT(), &status);

    return status;

}

int32 _socket_read(int fd, uint8 *buf, int32 len)
{
    int32 status;

    //for reading, we don't mind if it's a zombie - we
    //can read whatever is in the ibuf - if any
    status = _socket_lock_and_check(fd, 0);

    if(status < 0)
    {
        mutex_unlock(sockets[fd].mutex);
        return status;
    }

    if(!(sockets[fd].state&SOCKET_IBUF_PENDING))
    {
        //if no ibufs are pending and if this is already a zombie
        //we'll not try blocking read
        if(sockets[fd].state&SOCKET_ZOMBIE)
        {
            mutex_unlock(sockets[fd].mutex);
            return -EPIPE;
        }

        sockets[fd].req_buf = buf;
        sockets[fd].req_count = len;
        sockets[fd].req_ws = CURR_WAIT_STRUCT();
        if(sockets[fd].rw_timer.interval != (clock_time_t)-1)
        {
            timer_restart(&sockets[fd].rw_timer);
        }
        sockets[fd].state |= SOCKET_READ_PENDING;
        mutex_unlock(sockets[fd].mutex);
        TASK_WAIT(CURR_WAIT_STRUCT(), &status);
        //again lock the socket
        if(status >= 0)
        {
            status = _socket_lock_and_check(fd, 0);
        }else
        {
            return status;//read failed - so go back
        }
    }

    if(status >= 0)
    {
        status = -EPIPE;
        if(sockets[fd].state&SOCKET_IBUF_PENDING)
        {
            status = _socket_buf_copy(buf, &sockets[fd].ibuf, len);
            if(sockets[fd].ibuf.count == 0)
            {
                sockets[fd].state &= ~SOCKET_IBUF_PENDING;
            }
        }
    }
    mutex_unlock(sockets[fd].mutex);

    return status;
}

int32 _socket_write(int fd, uint8 *buf, int32 len)
{
    int32 status;

    status = _socket_lock_and_check(fd, SOCKET_CHECK_ZOMBIE);

    if(status < 0)
    {
        mutex_unlock(sockets[fd].mutex);
        return status;
    }

    sockets[fd].req_buf = buf;
    sockets[fd].req_count = len;
    sockets[fd].close_status = 0;
    sockets[fd].req_ws = CURR_WAIT_STRUCT();
    sockets[fd].state |= SOCKET_WRITE_PENDING;

    if(sockets[fd].rw_timer.interval != (clock_time_t)-1)
    {
        timer_restart(&sockets[fd].rw_timer);
    }

    mutex_unlock(sockets[fd].mutex);
    TASK_WAIT(CURR_WAIT_STRUCT(), &status);

    return status;

}

int32 _socket_udp_connect(int fd, dgram_param_t *param)
{
    int32 status;
    socket_udp_msg_t msg;

    socket_ip_aton_(msg.ipbuf, sizeof(msg.ipbuf), param->ip);

    status = _socket_lock_and_check(fd, SOCKET_CHECK_ZOMBIE);
    if(status < 0)
    {
        mutex_unlock(sockets[fd].mutex);
        return status;
    }

    msg.port = param->port;

    sockets[fd].req_buf = (void*)&msg;
    sockets[fd].req_count = sizeof(msg);
    sockets[fd].req_ws = CURR_WAIT_STRUCT();
    sockets[fd].state |= SOCKET_PENDING_UIP_UDP_CONNECT;

    mutex_unlock(sockets[fd].mutex);

    TASK_WAIT(CURR_WAIT_STRUCT(), &status);

    return status;
}

int32 _socket_udp_send_to(int fd, uint8 *buf, int32 len, dgram_param_t *param)
{
    int32 status;
    socket_udp_msg_t msg;

    socket_ip_aton_(msg.ipbuf, sizeof(msg.ipbuf), param->ip);

    status = _socket_lock_and_check(fd, SOCKET_CHECK_ZOMBIE);

    if(status < 0)
    {
        mutex_unlock(sockets[fd].mutex);
        return status;
    }

    msg.port = param->port;
    msg.buf = buf;
    msg.count = len;

    sockets[fd].req_buf = (void*)&msg;
    sockets[fd].req_count = sizeof(msg);
    sockets[fd].close_status = 0;
    sockets[fd].req_ws = CURR_WAIT_STRUCT();
    sockets[fd].state |= SOCKET_UDP_SEND_TO_PENDING;

    if(sockets[fd].rw_timer.interval != (clock_time_t)-1)
    {
        timer_restart(&sockets[fd].rw_timer);
    }

    mutex_unlock(sockets[fd].mutex);
    TASK_WAIT(CURR_WAIT_STRUCT(), &status);

    return status;

}

int32 _socket_setopt(int fd, int32 opt, void *val)
{
    int32 status=EINVAL;
    status = _socket_lock_and_check(fd, SOCKET_CHECK_ZOMBIE);

    if(status < 0)
    {
        mutex_unlock(sockets[fd].mutex);
        return status;
    }

    switch(opt)
    {
        case STREAM_OPT_ADDR_REUSE:
            break;
        case STREAM_OPT_READ_TIMEOUT:
            timer_set(&sockets[fd].rw_timer, *((clock_time_t*)val));
            break;
    }

    mutex_unlock(sockets[fd].mutex);

    return status;
}

/*
 * This is the socket_close method called by application tasks
 */
int32 _socket_close(int fd)
{
    int32 status;

    if(fd < 0 || fd >= SOCKET_MAX)
    {
        return -EBADFD;
    }

    mutex_lock(sockets[fd].mutex);

    if(!(sockets[fd].state&SOCKET_ZOMBIE))
    {
        sockets[fd].req_ws = CURR_WAIT_STRUCT();
        sockets[fd].state |= SOCKET_CLOSE_PENDING;

        mutex_unlock(sockets[fd].mutex);

        TASK_WAIT(CURR_WAIT_STRUCT(), &status);
        //clear ibuf and release the socket
        sockets[fd].ibuf.pos = 0;
        sockets[fd].ibuf.count = 0;
        sockets[fd].state = SOCKET_CLOSED;
    }else
    {
        //this is already a zombie - so clear ibuf and release it
        sockets[fd].ibuf.pos = 0;
        sockets[fd].ibuf.count = 0;
        sockets[fd].state = SOCKET_CLOSED;
        mutex_unlock(sockets[fd].mutex);
    }

    return 0;
}

/*
 * Lock the socket mutex and do basic validation
 */
int32 _socket_lock_and_check(int fd, uint32 check)
{
    int32 status;

    if(fd < 0 || fd >= SOCKET_MAX)
    {
        return -EBADFD;
    }

    mutex_lock(sockets[fd].mutex);

    if(!(sockets[fd].state&SOCKET_OPEN))
    {
        status = -EBADFD;
        return status;
    }

    if((check&SOCKET_CHECK_ZOMBIE) &&
            (sockets[fd].state&SOCKET_ZOMBIE) )
    {
        //the socket was closed owing to some internal error
        //socket status was preserved so that it can be given to the application
        //save the status on the local stack
        status = sockets[fd].close_status;

        if(status >= 0)//no proper error code
        {
            status = -ESTALE;
        }

        return status;

    }

    if((check&SOCKET_CHECK_BIND) && (sockets[fd].lport == 0))
    {
        return -EINVAL;//TODO - not bound error
    }
    return 0;
}

/*
 * This is the actual socket_close method - invoked by the uip_task
 */
void socket_close_(socket_t *sock, int32 status)
{
    wait_struct_t *saved_ws;

    //this is always called when socket mutex is locked
    //so if we do it again here it'll cause deadlock

    sock->req_buf = (void*)0UL;//can't do any more writes
    sock->req_count = 0;
    sock->close_status = status;
    if(sock->conn != (struct uip_conn*)0UL)
    {
        sock->conn->appstate = (socket_t*)0UL;
        sock->conn=(struct uip_conn *)0UL;
    }

    if(sock->udp_conn != (struct uip_udp_conn*)0UL)
    {
        sock->udp_conn->appstate = (socket_t*)0UL;
        sock->udp_conn=(struct uip_udp_conn *)0UL;
    }

    //the socket can't be reallocated till application
    //gets the close status
    saved_ws = sock->req_ws;
    sock->req_ws = (wait_struct_t*)0UL;
    sock->state |= SOCKET_ZOMBIE;

    taskENTER_CRITICAL();
    {
        socket_list_remove_(&lh_open, sock);
    }
    taskEXIT_CRITICAL();

    if(sock->state&(SOCKET_READ_PENDING|SOCKET_WRITE_PENDING|SOCKET_UDP_SEND_TO_PENDING|SOCKET_CLOSE_PENDING ))
    {
        TASK_NOTIFY(saved_ws, &status);
    }

}

void socket_process_pending_lists_()
{
    stream_param_t *param;
    socket_udp_msg_t *udp_msg;
    uint8           ipbuf[4];
    uip_ipaddr_t    ipaddr;
    socket_t        *sock=lh_open;
    int32           status;
    struct uip_conn *conn;
    struct uip_udp_conn *udp_conn;

    while(sock)
    {
        mutex_lock(sock->mutex);

        if(sock->state&SOCKET_CLOSE_PENDING)
        {
            socket_close_(sock, 0);//proper close
        }else
        if(sock->state&SOCKET_PENDING_UIP_LISTEN)
        {
            sock->state &= ~SOCKET_PENDING_UIP_LISTEN;
            uip_listen(HTONS(sock->lport));
        }else
        if(sock->state&SOCKET_PENDING_UIP_CONNECT)
        {
            sock->state &= ~SOCKET_PENDING_UIP_CONNECT;
            param = (stream_param_t *)sock->req_buf;
            socket_ip_aton_(ipbuf, sizeof(ipbuf), param->ip);
            uip_ipaddr(ipaddr, ipbuf[0], ipbuf[1], ipbuf[2], ipbuf[3]);
            conn = uip_connect(&ipaddr, htons(param->port));
            if(conn == (struct uip_conn*)0UL)
            {
                status = -EINVAL;
                TASK_NOTIFY(sock->req_ws, &status);
            }else
            {
                conn->appstate = sock;
                sock->conn = conn;
                sock->lport = htons(conn->lport);
                sock->state |= SOCKET_ACTIVE;//we're active now
            }
        }else
        if(sock->state&SOCKET_PENDING_UIP_UDP_CONNECT)
        {
            sock->state &= ~SOCKET_PENDING_UIP_UDP_CONNECT;
            udp_msg = (socket_udp_msg_t *)sock->req_buf;
            uip_ipaddr(ipaddr, udp_msg->ipbuf[0], udp_msg->ipbuf[1], udp_msg->ipbuf[2], udp_msg->ipbuf[3]);
            udp_conn = uip_udp_new(&ipaddr, htons(udp_msg->port));
            if(udp_conn == (struct uip_udp_conn*)0UL)
            {
                status = -EINVAL;
            }else
            {
                udp_conn->appstate = sock;
                sock->udp_conn = udp_conn;
                sock->lport = htons(udp_conn->lport);
                sock->state |= SOCKET_ACTIVE;//we're active now
            }
            TASK_NOTIFY(sock->req_ws, &status);
        }else
        if(sock->state&SOCKET_READ_PENDING)
        {
            if(     (sock->rw_timer.interval != (clock_time_t)-1)
               &&   timer_expired(&sock->rw_timer) )
            {
                status = -ETIMEDOUT;
                sock->state &= ~SOCKET_READ_PENDING;
                TASK_NOTIFY(sock->req_ws, &status);
            }else
            if(sock->state&SOCKET_IBUF_PENDING)
            {
                //This task blocked on read_pending just before it got a buffer
                //so we need to wake up the task
                status = 0;
                sock->state &= ~SOCKET_READ_PENDING;
                TASK_NOTIFY(sock->req_ws, &status);
            }
        }else
        if( (sock->state&SOCKET_WRITE_PENDING)||(sock->state&SOCKET_UDP_SEND_TO_PENDING))
        {
            if( (sock->rw_timer.interval != (clock_time_t)-1)
               &&   timer_expired(&sock->rw_timer) )
            {
                status = -ETIMEDOUT;
                sock->state &= ~(SOCKET_WRITE_PENDING|SOCKET_UDP_SEND_TO_PENDING);
                TASK_NOTIFY(sock->req_ws, &status);
            }
        }

        mutex_unlock(sock->mutex);

        sock=sock->next;
    }

}

void socket_process_new_connect_()
{
    stream_param_t  *p;
    int32           status;
    uint8           ipbuf[4];
    socket_t *sock;

    if(uip_udpconnection())
    {
        sock=(socket_t*)uip_udp_conn->appstate;
    }else
    {
        sock=(socket_t*)uip_conn->appstate;
    }

/*TODO: For Active Sockets:
    if(sock != (socket_t *)0UL)
    {
        mutex_lock(sock->mutex);
        status = 0;
        TASK_NOTIFY(sock->req_ws, &status);
        mutex_unlock(sock->mutex);
        return;
    }*/

    //Not an active connect.
    //So check for matching passive listening socket

    sock=lh_open;

    while(sock)
    {
        mutex_lock(sock->mutex);
        if( (!(sock->state&SOCKET_ACTIVE)) &&
            (sock->state&SOCKET_LISTEN) &&
            (htons(sock->lport) == uip_conn->lport)     )
        {
            uip_conn->appstate = sock;
            sock->conn = uip_conn;
            if(sock->state&SOCKET_LISTEN)
            {
                p = (stream_param_t*)sock->req_buf;
                ipbuf[0] = uip_ipaddr1(uip_conn->ripaddr);
                ipbuf[1] = uip_ipaddr2(uip_conn->ripaddr);
                ipbuf[2] = uip_ipaddr3(uip_conn->ripaddr);
                ipbuf[3] = uip_ipaddr4(uip_conn->ripaddr);

                socket_ip_ntoa_( p->ip, sizeof(p->ip), ipbuf);
                p->port = ntohs(uip_conn->rport);
            }

            sock->state |= SOCKET_ACTIVE;
            status = 0;
            TASK_NOTIFY(sock->req_ws, &status);
            mutex_unlock(sock->mutex);
            break;
        }
        mutex_unlock(sock->mutex);
        sock = sock->next;
    }

}

void socket_process_close_()
{
    socket_t *sock;

    if(uip_udpconnection())
    {
        sock=(socket_t*)uip_udp_conn->appstate;
    }else
    {
        sock=(socket_t*)uip_conn->appstate;
    }

    if(sock)
    {
        mutex_lock(sock->mutex);
    }

    if(sock)
    {
        socket_close_(sock, -ECONNRESET);
    }

    if(sock)
    {
        mutex_unlock(sock->mutex);
    }
}

void socket_process_timeout_()
{
    socket_t *sock;

    if(uip_udpconnection())
    {
        sock=(socket_t*)uip_udp_conn->appstate;
    }else
    {
        sock=(socket_t*)uip_conn->appstate;
    }

    if(sock)
    {
        mutex_lock(sock->mutex);
    }

    if(sock && (sock->state&SOCKET_OPEN))
    {
        socket_close_(sock, -ETIMEDOUT);
    }

    if(sock)
    {
        mutex_unlock(sock->mutex);
    }
}

void socket_process_try_restart_()
{
    socket_t *sock;

    if(uip_udpconnection())
    {
        sock=(socket_t*)uip_udp_conn->appstate;
    }else
    {
        sock=(socket_t*)uip_conn->appstate;
    }

    if(sock)
    {
        mutex_lock(sock->mutex);
    }

    if(sock && (sock->state&SOCKET_ACTIVE))
    {
        if(!(sock->state&SOCKET_IBUF_PENDING))
        {
            uip_restart();
        }
    }else
    {
        uip_close();
    }

    if(sock)
    {
        mutex_unlock(sock->mutex);
    }

}

void socket_process_write_()
{
    int32 status;
    socket_t *sock;
    socket_udp_msg_t *udp_msg;
    uip_ipaddr_t ipaddr;

    if(uip_udpconnection())
    {
        sock=(socket_t*)uip_udp_conn->appstate;
    }else
    {
        sock=(socket_t*)uip_conn->appstate;
    }

    if(sock)
    {
        mutex_lock(sock->mutex);
    }

    if(sock && (sock->state&SOCKET_ACTIVE))
    {
        if(sock->state&SOCKET_WRITE_PENDING)
        {
            if(uip_acked())
            {
                sock->req_count -= sock->write_count;
                sock->req_buf += sock->write_count;
                sock->state &= ~SOCKET_WRITE_ACK_WAIT;
            }else
            if(uip_rexmit())
            {
                uip_send(sock->req_buf, sock->req_count);
            }

            if(sock->req_count > 0)
            {
                if(!(sock->state&SOCKET_WRITE_ACK_WAIT))
                {
                    sock->state|= SOCKET_WRITE_ACK_WAIT;
                    if(sock->req_count < uip_mss())
                    {
                        sock->write_count = sock->req_count;
                    }else
                    {
                        sock->write_count = uip_mss();
                    }
                    uip_send(sock->req_buf, sock->write_count);
                }
            }else
            {
                status = 0;
                sock->state &= ~SOCKET_WRITE_PENDING;
                TASK_NOTIFY(sock->req_ws, &status);
            }

        }else
        if(sock->state&SOCKET_UDP_SEND_TO_PENDING)
        {
            udp_msg = (socket_udp_msg_t*)sock->req_buf;
            uip_ipaddr(ipaddr, udp_msg->ipbuf[0], udp_msg->ipbuf[1], udp_msg->ipbuf[2], udp_msg->ipbuf[3]);
            uip_udp_conn->rport = htons(udp_msg->port);
            uip_ipaddr_copy(&uip_udp_conn->ripaddr, &ipaddr);
            if(udp_msg->count > UIP_APPDATA_SIZE)
            {
                udp_msg->count = UIP_APPDATA_SIZE;
            }
            memcpy(uip_appdata, udp_msg->buf,udp_msg->count);
            uip_udp_send(udp_msg->count);
            status = udp_msg->count;
            sock->state &= ~SOCKET_UDP_SEND_TO_PENDING;
            TASK_NOTIFY(sock->req_ws, &status);
        }
    }else
    {
        uip_close();
    }

    if(sock)
    {
        mutex_unlock(sock->mutex);
    }

}


void socket_process_new_data_()
{
    socket_t *sock;
    int32 status;

    if(uip_udpconnection())
    {
        sock=(socket_t*)uip_udp_conn->appstate;
    }else
    {
        sock=(socket_t*)uip_conn->appstate;
    }

    if(sock)
    {
        mutex_lock(sock->mutex);
    }

    if(sock && (sock->state&SOCKET_ACTIVE))
    {
        if(!(sock->state&SOCKET_IBUF_PENDING))
        {
            memcpy(sock->ibuf.buf, uip_appdata, uip_datalen());
            sock->ibuf.pos = 0;
            sock->ibuf.count = uip_datalen();
            sock->state |= SOCKET_IBUF_PENDING;
            uip_stop();//we can't process any further data till this buffer is consumed
            if(sock->state&SOCKET_READ_PENDING)
            {
                status = 0;
                sock->state &= ~SOCKET_READ_PENDING;
                TASK_NOTIFY(sock->req_ws, &status);
            }
        }else
        {
            //This condition should not happen - since as soon as we
            //get data and move it to the ibuf we would've done uip_stop
            //till sbuf is free again
            klog_error("%s:%d> Socket read out of sync. Received socket data, but buf is not free!"
            __FILE__, __LINE__);
            uip_close();
            socket_close_(sock, -ENOBUFS);
        }
    }else
    {
        uip_close();
    }

    if(sock)
    {
        mutex_unlock(sock->mutex);
    }

}

void socket_ip_aton_(uint8 *ipbuf, int ipbuf_sz, const char *ipstr)
{
    if(ipbuf_sz < 4)
    {
        return;
    }

    ipbuf[0] = atoi(ipstr);
    ipbuf[1] = ipbuf[2]= ipbuf[3] = 0;

    while(*ipstr && *ipstr != '.') ipstr++;
    if(*ipstr) ipstr++;

    if(*ipstr) ipbuf[1] = atoi(ipstr);

    while(*ipstr && *ipstr != '.') ipstr++;
    if(*ipstr) ipstr++;

    if(*ipstr) ipbuf[2] = atoi(ipstr);

    while(*ipstr && *ipstr != '.') ipstr++;
    if(*ipstr) ipstr++;

    if(*ipstr) ipbuf[3] = atoi(ipstr);

}

void socket_ip_ntoa_(char *ipstr, int ipstr_sz, const uint8 *ipbuf)
{
    uint8 val;
    int i;

    if(ipstr_sz <= 1)
    {
        return;
    }

    for(i=0;i<4;i++)
    {

        val = ipbuf[i]/100U;
        if(val != 0)
        {
            *ipstr = val + '0';
            ipstr++;
            ipstr_sz--;
            if(ipstr_sz <= 0)
            {
                break;
            }
        }

        val = (ipbuf[i]-val*100)/10;

        if(val != 0)
        {
            *ipstr = val + '0';
            ipstr++;
            ipstr_sz--;
            if(ipstr_sz <= 0)
            {
                break;
            }
        }

        val = ipbuf[i]%10;

        *ipstr = val + '0';
        ipstr++;
        ipstr_sz--;
        if(ipstr_sz <= 0)
        {
            break;
        }

        if(i < 3)
        {
            *ipstr = '.';
            ipstr++;
            ipstr_sz--;
            if(ipstr_sz <= 0)
            {
                break;
            }
        }
    }

    *ipstr= '\0';

}

int32 _socket_buf_copy(void *buf, socket_buf_t *sbuf, uint32 len)
{
    int32 len_to_copy;

    if(sbuf->count == 0 || len == 0)
    {
        return 0;
    }
    len_to_copy = (len > sbuf->count)? sbuf->count : len ;

    memcpy(buf, &sbuf->buf[sbuf->pos], len_to_copy);

    sbuf->pos += len_to_copy;
    sbuf->count -= len_to_copy;

    return len_to_copy;

}


void _socket_list_add(socket_t **p_list_head, socket_t *item)
{
    item->next = *p_list_head;
    *p_list_head = item;
}

void socket_list_remove_(socket_t **p_list_head, socket_t *item)
{
    socket_t *prev;
    socket_t *curr;

    if(*p_list_head == item)
    {
        *p_list_head = item->next;
        item->next = (socket_t*)0UL;
    }else
    {
        prev = *p_list_head;
        curr = (*p_list_head)->next;
        while(curr)
        {
            if(curr == item)
            {
                prev->next = curr->next;
                curr->next = (socket_t*)0UL;
                break;
            }
            prev=curr;
            curr=curr->next;
        }
    }
}
