/* spice-server character device to handle a video stream

   Copyright (C) 2017 Red Hat, Inc.

   This library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   This library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif

#include "char-device.h"
#include "gpunvidia-channel.h"
#include "cursor-channel.h"
#include "reds.h"
#include "ringbuffer.h"
#include "minilzo.h"
#include <math.h>
#include <sys/queue.h>
#include <zlib.h>

#define BLOCKSIZE 16
#define SCREENWIDTH 2048
#define SCREENHEIGHT 2048
#define COLORDEPTH 4

typedef struct _PerfReqInfo{
    int index;
    int raw_size;
    int mon_id;
    int number;
    uint64_t timestamp;
}PerfReqInfo;

typedef struct _PerfInfo{
    int number;
    int raw_size;
    int compress_size;
    int reserv;
    float compress_cost;
    float uncompress_cost;
    uint64_t timestamp;
}PerfInfo;

typedef struct _GpuMsg{
    int size;
    int cmd;
    unsigned char data[0];
}GpuMsg;

typedef struct _NofityGuestGchanelState{
    int monitor_id;
    int state;          // 0 disconnected, 1 connected
}NofityGuestGchanelState;

typedef struct _resolve_info{
    uint32_t mon_id;
    uint32_t mon_pitch;
    uint32_t mon_width;
    uint32_t mon_height;
}resolve_info;

typedef struct _ResolveChangesEx {
    int index;   // set 0xFFFA
    int count;
    int mon_id;  // must set 0
    GpuModeType mode;
    resolve_info res[4];
}ResolveChangesEx;

typedef struct _RectChanges {
    int index;
    int resver;
    int mon_id;
    int resver2;
    unsigned char data[BLOCKSIZE * BLOCKSIZE * COLORDEPTH];
}RectChanges;

typedef struct _RectChangesEx {
    int index;
    int resver;
    int mon_id;
    int resver2;
    unsigned char data[SCREENWIDTH * SCREENHEIGHT * COLORDEPTH];
}RectChangesEx;

typedef struct _RectChangesEx3 {
    int index;
    int dataLen;
    int mon_id;
    uint16_t left, top, right, bottom;
    unsigned char data[0];
}RectChangesEx3;

typedef struct _CursorChangesEx {
    int index;
    int data_len;
    int mon_id;
    int resver2;
    int width;
    int height;
    int hide;
    int type;
    int x;
    int y;
    unsigned char data[SCREENWIDTH * SCREENHEIGHT * COLORDEPTH - 8*sizeof(int)];
}CursorChangesEx;
//#define GPU_RINGBUFFER
#ifdef GPU_RINGBUFFER
typedef struct _gpunvidia_data_ring
{
    ringbuffer_t *rb;
    int maxlen;
}gpunvidia_data_ring;
#else
typedef struct GpuDevHeader {
    uint32_t size;
} GpuDevHeader;

typedef struct GpuReader{
    GpuDevHeader hdr;
    uint32_t hdr_pos;
    uint32_t handlled;
    uint8_t hdr_data[GPU_MAX_WIDTH*GPU_MAX_HEIGHT*8 - 8];
}GpuReader;
#endif

gpu_ext *g_gpu_ext = NULL;
#ifdef GPU_RINGBUFFER
static gpunvidia_data_ring g_gpunvidia_data_ring;
#else
static GpuReader *g_reader = NULL;
#endif
static void gpu_client_connected(int connected);
static void gpu_client_finish_resolve(void);
static void gpu_client_drop_stream(void);
static void gpu_client_sys_ack(int mon_id, uint32_t nums);
static void gpu_client_performance_test(uint8_t *buffer,
    int compress_size, float compress_cost, float uncompress_cost);
static void gpu_client_stream_ready(int mon_id);
static void gpu_ext_handle_rect3(uint8_t *buffer, int len);
static void gpu_ext_handle_stream_frame(void *buffer);
static void gpunvidia_device_stream_data(RedCharDevice *self, const void *data, size_t size, uint32_t mm_time, int monitor_id);
static void gpunvidia_device_create_channel(RedCharDevice *self, int monitor_id);
static void gpunvidia_device_cursor_cmd(RedCharDevice *self, RedCursorCmd *cmd, int monitor_id);
static void gpunvidia_device_change_format(RedCharDevice *dev, const GchannelFormat *format);

static uint8_t g_frame_buffer[GPU_MAX_WIDTH*GPU_MAX_HEIGHT*8];

static inline void gpu_mixture_reset(gpu_monitor_ext *ext)
{
    if(ext->g_mix_mark_data != NULL){
        free(ext->g_mix_mark_data);
        ext->g_mix_mark_data = NULL;
    }
    ext->g_mix_mark_data_len = 0;
}

static inline void gpu_qxl_wakeup(QXLInstance *qxl)
{
    if(qxl) spice_qxl_wakeup(qxl);
}

static void gpu_qxl_wakeup_all(void)
{
    for(int i=0; i<GPU_MAX_MONITORS; i++){
        gpu_qxl_wakeup(reds_gpu_get_qxl(i));
    }
}

static inline void gpu_ext_statistic_reset(gpu_monitor_ext *ext)
{
    spice_warning("### %d monitor ext statistic reset.", ext->g_mon_id);
    ext->m_statistic_image_bitmap_free = 0;
    ext->m_statistic_image_bitmap_malloc = 0;
    ext->m_statistic_image_item_free = 0;
    ext->m_statistic_image_item_malloc = 0;
    ext->m_statistic_image_queue_free = 0;
}

static inline void gpu_ext_statistic_print(gpu_monitor_ext *m, gboolean flush)
{
    return ;
    spice_warning("### %d monitor %s flush statistic overview"
        "image_item (m:%d,f:%d), bitmap(m:%d,f:%d), queue:%d",
        m->g_mon_id,
        flush ? "after" :"before",
        m->m_statistic_image_item_malloc,
        m->m_statistic_image_item_free,
        m->m_statistic_image_bitmap_malloc,
        m->m_statistic_image_bitmap_free,
        m->m_statistic_image_queue_free);
}

static void gpu_ext_statistic_reset_all()
{
    gpu_monitor_ext *m;
    for(int i=0; i<GPU_MAX_MONITORS; i++){
        m = reds_gpu_get_ext(i);
        gpu_ext_statistic_reset(m);
    }
}

static void gpu_ext_init(gpu_ext **e)
{
    gpu_ext *ext=NULL;
    gpu_monitor_ext *mon_ext = NULL;

    if(*e){
        spice_warning("gpu ext has init.");
        return ;
    }

    // TODO, memory leak, isn't must be free
    *e = malloc(sizeof(gpu_ext));
    ext = *e;

    ext->g_trigger = 0;
    ext->m_resolve_state = GPU_RESOLVE_INIT;
    ext->uid = 0U;
    ext->g_start_stream = 0;
    ext->m_charDev = NULL;
    ext->channel_is_connected = NULL;
    ext->m_main_disconnected = GPU_MAIN_CHANNEL_CONNECTED;
    ext->m_mode_type = GPU_MODE_SYS;
    ext->g_stream_drop_flags = 0;
    ext->m_is_gpu_stream_mode = 0;
    ext->m_overstock = 0;
    ext->m_prep_stream_state = GPU_PREP_STREAM_INIT;
    ext->g_uncompress_data = (uint8_t *)malloc(GPU_MAX_WIDTH * GPU_MAX_HEIGHT * COLORDEPTH * 2);
    for(int i=0; i<GPU_MAX_MONITORS; i++){
        ext->g_mon_ext[i] = (gpu_monitor_ext *)calloc(1, sizeof(gpu_monitor_ext));
        mon_ext = ext->g_mon_ext[i];

        pthread_mutex_init(&mon_ext->g_lock, NULL);
        pthread_mutex_init(&mon_ext->g_stream_lock, NULL);
        memset(&mon_ext->g_cursorData, 0, sizeof(mon_ext->g_cursorData));
        memset(&mon_ext->g_mix, 0, sizeof(mon_ext->g_mix));

        mon_ext->g_mon_id = i;
        mon_ext->g_qxl = NULL;
        mon_ext->g_height = 0;
        mon_ext->g_width = 0;
        mon_ext->g_pitch = 0;
        mon_ext->g_show_cursor = 0;
        mon_ext->g_last_frame_mm_time = 0u;
        mon_ext->g_stream_item_couter = 0;
        mon_ext->g_stream_created_couter = 0;
        mon_ext->g_release = 0;
        mon_ext->g_mix_mark_data = NULL;
        mon_ext->g_mix_mark_data_len = 0;
        mon_ext->m_sys_frames = 0u;
        mon_ext->g_stream_frames = 0;
        mon_ext->is_first_frame = 1;
        mon_ext->m_debug_local_frame_time = 0u;
        mon_ext->m_stream_state = GPU_STREAM_INIT;
        mon_ext->m_gpunvidia_channel = NULL;
        mon_ext->m_gpunvidia_cursor_channel = NULL;
        mon_ext->g_primary_surface = (uint8_t *)calloc(1, GPU_MAX_HEIGHT * GPU_MAX_WIDTH * 4);
        gpu_ext_statistic_reset(mon_ext);

        TAILQ_INIT(&mon_ext->g_streams);
        TAILQ_INIT(&mon_ext->g_images);
    }

    if (lzo_init() != LZO_E_OK) {
        spice_warning("internal error. lzo init faild.");
    }
}

static void gpu_ext_handle_cursor(void *buffer)
{
    CursorChangesEx *cursorEx = (CursorChangesEx*)buffer;
    cursorData *cur;
    gpu_monitor_ext *mon_ext = reds_gpu_get_ext(cursorEx->mon_id);

    if(!mon_ext->g_qxl)
        return ;

    cur = &mon_ext->g_cursorData;
    if(cursorEx->hide == 1){
        cur->hide = cursorEx->hide;
    }else{
        cur->len = cursorEx->data_len;
        cur->w = cursorEx->width;
        cur->h = cursorEx->height;
        cur->type = cursorEx->type;
        cur->hide = cursorEx->hide;
        cur->x = cursorEx->x;
        cur->y = cursorEx->y;
        memcpy(cur->data , cursorEx->data, cursorEx->data_len);
    }

    mon_ext->g_show_cursor = 0;

    gpu_qxl_wakeup_all();
}

static void gpu_ext_handle_query_client_connected(void)
{
    if(!g_gpu_ext->channel_is_connected){
        return ;
    }

    gpu_client_connected(g_gpu_ext->channel_is_connected(0));
}

static void gpu_ext_flush_stream_item(void)
{
    struct gpu_stream_item *p;
    gpu_monitor_ext *mon_ext;

    for(int i=0; i<GPU_MAX_MONITORS; i++){
        mon_ext = reds_gpu_get_ext(i);

        pthread_mutex_lock(&mon_ext->g_stream_lock);

        while(!TAILQ_EMPTY(&mon_ext->g_streams)){
            p = TAILQ_FIRST(&mon_ext->g_streams);
            TAILQ_REMOVE(&(mon_ext->g_streams), p, tailq);
            free(p->data);
            free(p);
        }

        pthread_mutex_unlock(&mon_ext->g_stream_lock);
    }
}

static void gpu_ext_flush_image_item(void)
{
    struct gpu_image_item *p;
    gpu_monitor_ext *mon_ext;

    for(int i=0; i<GPU_MAX_MONITORS; i++){
        mon_ext = reds_gpu_get_ext(i);

        pthread_mutex_lock(&mon_ext->g_lock);

        while(!TAILQ_EMPTY(&mon_ext->g_images)){
            p = TAILQ_FIRST(&mon_ext->g_images);
            TAILQ_REMOVE(&(mon_ext->g_images), p, tailq);
            free(p->data.bitmap);
            free(p);
            mon_ext->m_statistic_image_bitmap_free++;
            mon_ext->m_statistic_image_item_free++;
            mon_ext->m_statistic_image_queue_free++;
        }

        pthread_mutex_unlock(&mon_ext->g_lock);
    }
}

static void gpu_ext_handle_resolve(void *buffer)
{
    ResolveChangesEx *res = (ResolveChangesEx *)buffer;
    resolve_info *info;
    gpu_monitor_ext *mon_ext;

    if(!g_gpu_ext->channel_is_connected){
        return ;
    }

    // cancel create command
    g_gpu_ext->g_trigger = 0;

    // flush queue
    gpu_ext_flush_stream_item();

    g_gpu_ext->m_is_gpu_stream_mode = (res->mode == GPU_MODE_STREAM) ? 1 : 0;
    spice_warning("stream mode %d", g_gpu_ext->m_is_gpu_stream_mode);

    for(int i=0; i<res->count; i++){
        info = &res->res[i];
        if(info->mon_id >= GPU_MAX_MONITORS){
            spice_warning("error monitor %d id for setting resolve.", info->mon_id);
            continue;
        }

        mon_ext = reds_gpu_get_ext(info->mon_id);
        mon_ext->g_width = info->mon_width;
        mon_ext->g_height = info->mon_height;
        mon_ext->g_pitch =info->mon_pitch;
        spice_warning("setting monitor %d id resolve wxh:p= {%ux%u:%u}",
            mon_ext->g_mon_id, mon_ext->g_width, mon_ext->g_height, mon_ext->g_pitch);
    }

    // check default value
    for(int i=0;i<GPU_MAX_MONITORS;i++){
        mon_ext = reds_gpu_get_ext(i);
        if(mon_ext->g_width == 0 || mon_ext->g_height == 0 || mon_ext->g_pitch == 0){
            spice_warning("setting monitor %d id to default resolve value {1024x768:1024}", mon_ext->g_mon_id);
            mon_ext->g_width = 1024;
            mon_ext->g_height = 768;
            mon_ext->g_pitch = 1024;
        }
    }

    // check default ack
    reds_sync_reset_all();

    g_gpu_ext->m_resolve_state = GPU_RESOLVE_DESTROY_SURFACE;
    g_gpu_ext->g_trigger = 1;

    gpu_client_finish_resolve();
}

static void gpu_ext_create_image(gpu_monitor_ext *mon_ext, uint32_t timestamp, GpuImageType imagetype)
{
    struct gpu_image_item *item;
    static int frames_couter = 0;

    mon_ext->g_mix.left = mon_ext->g_mix.top = 0;
    mon_ext->g_mix.right = mon_ext->g_mix.bottom = 128;
    mon_ext->g_mix.dataLen = 128 * 128 *4;

    item = (struct gpu_image_item *)malloc(sizeof(struct gpu_image_item));
    item->data.top = 0;
    item->data.left = 0;
    item->data.bottom = 128;
    item->data.right = 128;
    item->data.len = 128 * 128 *4;
    item->data.bitmap = (uint8_t *)malloc(item->data.len);
    memset(item->data.bitmap, 0x00, item->data.len);
    item->data.mon_id = mon_ext->g_mon_id;
    item->mon_id = mon_ext->g_mon_id;
    item->type = GPU_GROUP_TYPE_STREAM;
    item->frame_mm_time = timestamp;
    item->last_rect = 1u;
    item->frames = ++frames_couter;
    item->image_type = imagetype;
    reds_perf_send(item->frames, item->frame_mm_time, SERVER_PERF_RECIVER_DATA);
    //spice_warning("tframes start %d %llu", frames_couter, spice_get_monotonic_time_ms());

    pthread_mutex_lock(&mon_ext->g_lock);
    TAILQ_INSERT_TAIL(&mon_ext->g_images, item, tailq);
    pthread_mutex_unlock(&mon_ext->g_lock);
}

static void gpu_ext_create_image_insert_head(gpu_monitor_ext *mon_ext, uint32_t timestamp, GpuImageType imagetype)
{
    struct gpu_image_item *item;
    static int frames_couter = 0;

    mon_ext->g_mix.left = mon_ext->g_mix.top = 0;
    mon_ext->g_mix.right = mon_ext->g_mix.bottom = 128;
    mon_ext->g_mix.dataLen = 128 * 128 *4;

    item = (struct gpu_image_item *)malloc(sizeof(struct gpu_image_item));
    item->data.top = 0;
    item->data.left = 0;
    item->data.bottom = 128;
    item->data.right = 128;
    item->data.len = 128 * 128 *4;
    item->data.bitmap = (uint8_t *)malloc(item->data.len);
    memset(item->data.bitmap, 0x00, item->data.len);
    item->data.mon_id = mon_ext->g_mon_id;
    item->mon_id = mon_ext->g_mon_id;
    item->type = GPU_GROUP_TYPE_STREAM;
    item->frame_mm_time = timestamp;
    item->last_rect = 1u;
    item->frames = ++frames_couter;
    item->image_type = imagetype;
    reds_perf_send(item->frames, item->frame_mm_time, SERVER_PERF_RECIVER_DATA);
    spice_warning("%d desktop recreate image %d", mon_ext->g_mon_id, timestamp);

    pthread_mutex_lock(&mon_ext->g_lock);
    TAILQ_INSERT_HEAD(&mon_ext->g_images, item, tailq);
    pthread_mutex_unlock(&mon_ext->g_lock);
}

static void gpu_ext_check_stream_queue(gpu_monitor_ext *mon_ext)
{
    struct gpu_stream_item *p;
    int couter = 0;

    pthread_mutex_lock(&mon_ext->g_stream_lock);

    while(!TAILQ_EMPTY(&mon_ext->g_streams)){
        p = TAILQ_FIRST(&mon_ext->g_streams);
        TAILQ_REMOVE(&(mon_ext->g_streams), p, tailq);
        free(p->data);
        free(p);
        couter++;
    }

    pthread_mutex_unlock(&mon_ext->g_stream_lock);

    if(couter > 0) {
        spice_warning("00 desktop %d stream begin time, but the queue has %d length, dropped!",
            mon_ext->g_mon_id, couter);
    }
}

static int gpu_statistics_stream_queue(gpu_monitor_ext *mon_ext)
{
    struct gpu_stream_item *item;
    struct gpu_image_item *img_item;
    int cnt = 0, img_cnt = 0;

    // dump stream info
    pthread_mutex_lock(&mon_ext->g_stream_lock);
    TAILQ_FOREACH(item, &mon_ext->g_streams, tailq){
        cnt++;
    }

    item = TAILQ_FIRST(&mon_ext->g_streams);
    //spice_warning("%d desktop stream queue first timestamp:%u", mon_ext->g_mon_id, item->frame_mm_time);
    pthread_mutex_unlock(&mon_ext->g_stream_lock);

    // dump image info
    pthread_mutex_lock(&mon_ext->g_lock);
    TAILQ_FOREACH(img_item, &mon_ext->g_images, tailq){
        img_cnt++;
    }
    img_item = TAILQ_FIRST(&mon_ext->g_images);
    //spice_warning("%d desktop image queue first timestamp:%u", mon_ext->g_mon_id, img_item->frame_mm_time);
    pthread_mutex_unlock(&mon_ext->g_lock);

    spice_warning("%d desktop stream queue lenght:%d, real queue:%d, image queue:%d",
        mon_ext->g_mon_id, mon_ext->g_stream_frames, cnt, img_cnt);
    return cnt;
}

uint32_t gpu_get_stream_first_timestamp(gpu_monitor_ext *mon_ext)
{
    uint32_t timestamp = 0;
    struct gpu_stream_item *item;

    pthread_mutex_lock(&mon_ext->g_stream_lock);
    if(!TAILQ_EMPTY(&mon_ext->g_streams)){
        item = TAILQ_FIRST(&mon_ext->g_streams);
        timestamp = item->frame_mm_time;
        spice_warning("%d desktop stream queue recreater first timestamp:%u", mon_ext->g_mon_id, timestamp);
    }

    pthread_mutex_unlock(&mon_ext->g_stream_lock);

    return timestamp;
}

static void gpu_ext_handle_stream_frame(void *buffer)
{
    struct gpu_stream_item *item;
    RectChangesEx *prectEx = (RectChangesEx *)buffer;
    gpu_monitor_ext *mon_ext;
    mon_ext = reds_gpu_get_ext(prectEx->mon_id);

    uint32_t curtime = 0u;
    if(mon_ext->m_debug_local_frame_time == 0u){
        gpu_ext_check_stream_queue(mon_ext);
        curtime = reds_get_mm_time();
        spice_warning("curtime first assign %u", curtime);
        mon_ext->g_stream_frames = 0;
    }else{
        curtime = reds_get_mm_time() + 1000 / 30;
    }
    mon_ext->m_debug_local_frame_time = curtime;

    // check memory overload
    if(mon_ext->g_stream_frames >= 30 * 40){
        if(gpu_statistics_stream_queue(mon_ext) >= 30 * 40){

            g_gpu_ext->m_overstock = 1;
            g_gpu_ext->g_trigger = 1;
            g_gpu_ext->g_start_stream = 1;

            gpu_qxl_wakeup_all();

            return ;
        }
    }

    item = (struct gpu_stream_item *)malloc(sizeof(struct gpu_stream_item));
    item->size = prectEx->resver;
    item->data = (uint8_t *)malloc(item->size);
    memcpy(item->data, prectEx->data, item->size);
    item->frame_mm_time = curtime;
    item->mon_id = prectEx->mon_id;
    mon_ext->g_stream_frames++;

    pthread_mutex_lock(&mon_ext->g_stream_lock);
    TAILQ_INSERT_TAIL(&mon_ext->g_streams, item, tailq);
    pthread_mutex_unlock(&mon_ext->g_stream_lock);

    if(g_gpu_ext->m_mode_type == GPU_MODE_GSTREAM){
        gpu_ext_create_image(mon_ext, item->frame_mm_time, GPU_IMAGE_TYPE_FRAME);
    }

    g_gpu_ext->g_trigger = 1;
    g_gpu_ext->g_start_stream = 1;

    gpu_qxl_wakeup_all();
}

static void gpu_ext_handle_stream_create(uint8_t *buffer)
{
    RectChangesEx *prectEx = (RectChangesEx *)buffer;
    gpu_monitor_ext *mon_ext;
    mon_ext = reds_gpu_get_ext(prectEx->mon_id);

    if(mon_ext->m_stream_state == GPU_STREAM_READY){
        mon_ext->m_stream_state = GPU_STREAM_INIT;
        gpu_client_stream_ready(prectEx->mon_id);
        spice_warning("notify application %d monitor surface ready.", prectEx->mon_id);
        return ;
    }

    uint32_t frame_mm_time = reds_get_mm_time()/* - 100*/;
    if(g_gpu_ext->m_mode_type == GPU_MODE_GSTREAM){
        gpu_ext_create_image(mon_ext, frame_mm_time, GPU_IMAGE_TYPE_HEART);
    }

    g_gpu_ext->g_trigger = 1;
    g_gpu_ext->g_start_stream = 1;

    gpu_qxl_wakeup_all();
}

static void gpu_ext_handle_gpu_mode(void *buffer)
{
    RectChangeGpuModeEx *mode = (RectChangeGpuModeEx *)buffer;
    g_gpu_ext->m_mode_type = mode->mode;
    spice_warning("set gpu mode %d", g_gpu_ext->m_mode_type);
    if(mode->mode == GPU_MODE_SYS && g_gpu_ext->g_start_stream == 1){
        g_gpu_ext->g_start_stream = 0;
    }

    if(mode->mode == GPU_MODE_GSTREAM){
        g_gpu_ext->m_prep_stream_state = GPU_PREP_STREAM_DROP;
        for(int i=0; i<GPU_MAX_MONITORS; i++){
             reds_gpu_get_ext(i)->m_stream_state = GPU_STREAM_CREATE;
        }
    }

    if(mode->mode == GPU_MODE_STREAM) {
        for(int i=0; i<GPU_MAX_MONITORS; i++){
            gpu_client_stream_ready(i);
        }
    }
}

static void gpu_ext_handle_performance_caps(uint8_t *buffer, int len, int compress_size, 
    float compress_cost, float uncompress_cost)
{
    gpu_client_performance_test(buffer, compress_size, compress_cost, uncompress_cost);

    gpu_ext_handle_rect3(buffer + sizeof(PerfReqInfo), len -sizeof(PerfReqInfo));
}

static void gpu_ext_handle_performance_stream_caps(uint8_t *buffer, int len, int compress_size,
    float compress_cost, float uncompress_cost)
{
    gpu_client_performance_test(buffer, compress_size, compress_cost, uncompress_cost);

    gpu_ext_handle_stream_frame(buffer + sizeof(PerfReqInfo));
}

static void gpu_ext_handle_mixture_stream(void *buffer)
{
    int item_cnt = 0;
    struct gpu_image_item *item, *items[4096];
    RectChangeMixtureEx *mix = (RectChangeMixtureEx *)buffer;
    if(SPICE_UNLIKELY(mix->mon_id > 3)){
        spice_warning("mixture montior id over range id:%d.", mix->mon_id);
        return ;
    }
    gpu_monitor_ext *mon_ext = reds_gpu_get_ext(mix->mon_id);
    int min_count = MAX(mix->count, GPU_MIXTURE_MIN);

    if(mix->type == GPU_MIXTURE_START){
        g_gpu_ext->g_start_stream = 1;
        memcpy(&mon_ext->g_mix, mix, sizeof(RectChangeMixtureEx));

        for(int i=0; i<min_count; i++){
            item = (struct gpu_image_item *)malloc(sizeof(struct gpu_image_item));

            item->data.top = mix->top;
            item->data.left = mix->left;
            item->data.bottom = MIN(mix->bottom, mon_ext->g_height );
            item->data.right = MIN(mix->right, mon_ext->g_width);
            item->data.len = mix->dataLen;
            item->data.bitmap = (uint8_t *)malloc(mix->dataLen);
            if(mon_ext->g_mix_mark_data == NULL){
                memset(item->data.bitmap, 0x00, mix->dataLen);
            }else{
                memcpy(item->data.bitmap, mon_ext->g_mix_mark_data,
                    mix->dataLen > mon_ext->g_mix_mark_data_len ? mon_ext->g_mix_mark_data_len: mix->dataLen);
            }
            item->data.mon_id = mix->mon_id;
            item->mon_id = mix->mon_id;
            item->type = GPU_GROUP_TYPE_STREAM;
            items[item_cnt] = item;
            item_cnt++;
        }
    }else if(mix->type == GPU_MIXTURE_STOP){
        memset(&mon_ext->g_mix, 0, sizeof(RectChangeMixtureEx));
        g_gpu_ext->g_start_stream = 0;
        gpu_mixture_reset(mon_ext);
        gpu_ext_flush_stream_item();
    } else if(mix->type == GPU_MIXTURE_MARK){
        g_gpu_ext->g_start_stream = 1;
        memcpy(&mon_ext->g_mix, mix, sizeof(RectChangeMixtureEx));
        gpu_mixture_reset(mon_ext);
        mon_ext->g_mix_mark_data = (uint8_t *)malloc(mix->dataLen);
        mon_ext->g_mix_mark_data_len = mix->dataLen;
        memcpy(mon_ext->g_mix_mark_data, mix->data, mix->dataLen);
    }

    pthread_mutex_lock(&mon_ext->g_lock);
    for(int i=0; i<item_cnt; i++){
        TAILQ_INSERT_TAIL(&mon_ext->g_images, items[i], tailq);
    }
    pthread_mutex_unlock(&mon_ext->g_lock);

    g_gpu_ext->g_trigger = 1;

    if(mon_ext->g_qxl){
        spice_qxl_wakeup(mon_ext->g_qxl);
    }
}

static void gpu_ext_handle_rect3(uint8_t *buffer, int len)
{
    RectChangesEx3 *p;
    int item_cnt = 0, mon_id;
    struct gpu_image_item *item, *items[4096];
    gpu_monitor_ext *mon_ext;
    uint32_t *last_rect = NULL, *frame = NULL;
    static int frames_couter = 0;

    // ignore check message
    if(SPICE_UNLIKELY(len < sizeof(RectChanges)))
        return ;

    p = (RectChangesEx3 *)buffer;
    mon_id = p->mon_id;
    mon_ext = reds_gpu_get_ext(mon_id);

    for(int i=0; i<len;){
        p = (RectChangesEx3 *)(buffer + i);
        if(p->bottom - p->top <= 0 || p->right - p->left <= 0){
            i = i + p->dataLen + sizeof(RectChangesEx3);
            spice_warning("handle rect3 valid data. rect:{%d,%d,%d,%d}", p->left, p->top, p->right,p->bottom);
            continue;
        }
        item = (struct gpu_image_item *)malloc(sizeof(struct gpu_image_item));

        mon_ext->m_statistic_image_item_malloc++;
        item->data.top = p->top;
        item->data.left = p->left;
        item->data.bottom = MIN(p->bottom, mon_ext->g_height );
        item->data.right = MIN(p->right, mon_ext->g_width);
        item->data.len = p->dataLen;
        item->data.bitmap = (uint8_t *)malloc(p->dataLen);
        mon_ext->m_statistic_image_bitmap_malloc++;
        item->data.mon_id = p->mon_id;
        if(p->left == mon_ext->g_mix.left && p->top == mon_ext->g_mix.top && 
            p->right == mon_ext->g_mix.right && p->bottom == mon_ext->g_mix.bottom ){
            item->type = GPU_GROUP_TYPE_STREAM;
        }else{
            item->type = GPU_GROUP_TYPE_IMAE;
        }
        memcpy(item->data.bitmap, p->data, p->dataLen);
        item->mon_id = p->mon_id;
        item->frame_mm_time = 0u;
        item->last_rect = 0u;
        item->image_type = GPU_IMAGE_TYPE_RECT;
        last_rect = &item->last_rect;
        frame = &item->frames;
        items[item_cnt] = item;
        item_cnt++;

        g_gpu_ext->g_trigger = 1;
        p = (RectChangesEx3 *)(buffer + i);
        i = i + p->dataLen + sizeof(RectChangesEx3);
    }

    if(last_rect){
        *last_rect = 1u;
        frames_couter++;
        *frame = frames_couter;
    }

    pthread_mutex_lock(&mon_ext->g_lock);
    for(int i=0; i<item_cnt; i++){
        TAILQ_INSERT_TAIL(&mon_ext->g_images, items[i], tailq);
    }
    pthread_mutex_unlock(&mon_ext->g_lock);

    if(mon_ext->g_qxl){
        spice_qxl_wakeup(mon_ext->g_qxl);
    }
}

static void gpu_ext_handle_ack(uint8_t *data, int size)
{
    uint32_t nums;
    gpu_monitor_ext *m;

    for(int i=0; i<GPU_MAX_MONITORS; i++){
        m = reds_gpu_get_ext(i);

        nums = __sync_lock_test_and_set(&m->m_sys_frames, 0);
        __sync_lock_release(&m->m_sys_frames);

        if(nums > 0){
            gpu_client_sys_ack(m->g_mon_id, nums);
        }
    }
}

static void gpu_ext_handle_gchanel_create(RedCharDevice *dev, void *buffer)
{
    GchannelCreate *create = (GchannelCreate *)buffer;
    gpunvidia_device_create_channel(dev, create->monitor_id);
}

static void gpu_ext_handle_gchanel_format(RedCharDevice *dev, void *buffer)
{
    GchannelFormat *format = (GchannelFormat *)buffer;
    gpunvidia_device_change_format(dev, format);
}

static void gpu_ext_handle_gchanel_data(RedCharDevice *dev, void *buffer)
{
    GchannelData *data = (GchannelData *)buffer;
    uint32_t now = reds_get_mm_time();

    gpunvidia_device_stream_data(dev, data->data, data->datalen, now, data->monitor_id);

    g_gpu_ext->g_trigger = 1;
    g_gpu_ext->g_start_stream = 1;

    gpu_qxl_wakeup_all();
}

static void gpu_ext_handle_gchanel_cursor(RedCharDevice *dev, void *buffer)
{
    CursorChangesEx *cursorEx = (CursorChangesEx*)buffer;

    RedCursorCmd *cmd = g_new0(RedCursorCmd, 1);
    cmd->type = cursorEx->hide == 0 ? QXL_CURSOR_SET : QXL_CURSOR_HIDE;
    cmd->u.set.position.x = 400;
    cmd->u.set.position.y = 400;
    cmd->u.set.visible = cursorEx->hide == 0 ? 1 : 0;
    SpiceCursor *cursor = &cmd->u.set.shape;
    cursor->header.unique = 0;
    if(cursorEx->type == 1){
        cursor->header.type = SPICE_CURSOR_TYPE_MONO;
    }else{
        cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
    }
    cursor->header.width = cursorEx->width;
    cursor->header.height = cursorEx->height;
    cursor->header.hot_spot_x = cursorEx->x;
    cursor->header.hot_spot_y = cursorEx->y;
    cursor->flags = 0;

    cursor->data_size = cursorEx->width * cursorEx->height * 4;
    cursor->data = g_memdup(cursorEx->data, cursorEx->data_len);

    gpunvidia_device_cursor_cmd(dev, cmd, cursorEx->mon_id);

    gpu_qxl_wakeup_all();
}

static void gpu_ext_handle_diff(RedCharDevice *dev, uint8_t *srcbuffer, int srclen, uint32_t *handled)
{
    RectChangesEx *prectEx;
    unsigned long len = GPU_MAX_WIDTH * GPU_MAX_HEIGHT * COLORDEPTH *2;
    void *buffer = g_gpu_ext->g_uncompress_data;
    int res;
    int compress_size;
    float compress_cost, uncompress_cost;
    red_time_t start, stop;


    if(!g_gpu_ext->channel_is_connected){
        return ;
    }

    if(g_gpu_ext->m_main_disconnected == GPU_MAIN_CHANNEL_DISCONNECTED){
        g_gpu_ext->m_main_disconnected = GPU_MAIN_CHANNEL_CONNECTED;
        gpu_client_connected(0);
        return ;
    }

    if(!g_gpu_ext->channel_is_connected(0)){
        gpu_client_connected(0);
        return;
    }

    *handled = 0u;

#if 0
    // TODO, zip uncompress
    res = uncompress(g_gpu_ext->g_uncompress_data, &len, srcbuffer, srclen);
    if(res != Z_OK){
        spice_warning("uncompress data faild. res:%d, srclen:%d, dstlen:%d", res, srclen, len);
        return ;
    }
    //buffer = srcbuffer;
    //len = srclen;
#else
    lzo_uint new_len;
    len = *((uint32_t *)srcbuffer);
    new_len = len;
    compress_cost = *((float *)(srcbuffer + sizeof(uint32_t)));
    compress_size = srclen - 3 * sizeof(uint32_t);
    start = spice_get_monotonic_time_ms();
    res = lzo1x_decompress_safe(srcbuffer + 3 * sizeof(uint32_t), srclen - 3 * sizeof(uint32_t), g_gpu_ext->g_uncompress_data, &new_len, NULL);
    if(res != LZO_E_OK){
        spice_warning("uncompress data faild. res:%d, srclen:%lu, dstlen:%llu", res, srclen - sizeof(uint32_t), (unsigned long long)new_len);
        return ;
    }
    stop = spice_get_monotonic_time_ms();
    uncompress_cost = (float)(stop - start);
#endif

    if(g_gpu_ext->m_overstock == 1){
        gpu_client_connected(0);
        g_gpu_ext->m_overstock = 0;
        spice_warning("the streams queue overstock, so notify guest disconnect.");
        return;
    }

    prectEx = (RectChangesEx *)buffer;
    if(prectEx->index == GPU_CCOMMAND_CURSOR && g_gpu_ext->channel_is_connected(1)){
        gpu_ext_handle_cursor(buffer);
    }else if(prectEx->index == GPU_CCOMMAND_FULL_FRAME && g_gpu_ext->channel_is_connected(0)){
        /* departed */
        /*gpu_ext_handle_frame(buffer, len);*/
    }else if(prectEx->index == GPU_CCOMMAND_STREAM){
        if(!g_gpu_ext->channel_is_connected(0)){
            gpu_client_connected(0);
            return;
        }

        if(g_gpu_ext->g_stream_drop_flags & STREAM_DROP_FLAG){
            __sync_and_and_fetch(&g_gpu_ext->g_stream_drop_flags, ~STREAM_DROP_FLAG);
            gpu_client_drop_stream();
        }

        gpu_ext_handle_stream_frame(buffer);
    }
    else if(prectEx->index == GPU_CCOMMAND_CLIENT){
        //spice_warning("gpu_ext_handle_query_client_connected");
        gpu_ext_handle_query_client_connected();
    }else if(prectEx->index == GPU_CCOMMAND_RESOLVE){
        gpu_ext_handle_resolve(buffer);
    }
    else if (prectEx->index == GPU_CCOMMAND_MIXSTREAM){
        if(!g_gpu_ext->channel_is_connected(0)){
            gpu_client_connected(0);
            return;
        }
        gpu_ext_handle_mixture_stream(buffer);
    }
    else if (prectEx->index == GPU_CCOMMAND_GPU_MODE){
        gpu_ext_handle_gpu_mode(buffer);
    }
    else if (prectEx->index == GPU_CCOMMAND_PERF_SYS){
        gpu_ext_handle_performance_caps(buffer, len, compress_size, compress_cost, uncompress_cost);
    }
    else if (prectEx->index == GPU_CCOMMAND_PERF_STREAM){
        gpu_ext_handle_performance_stream_caps(buffer, len, compress_size, compress_cost, uncompress_cost);
    }
    else if (prectEx->index == GPU_CCOMMAND_HEART) {
        gpu_ext_handle_stream_create(buffer);
    }
    else if (prectEx->index == GPU_CCOMMAND_GCHANNEL_CREATE) {
        gpu_ext_handle_gchanel_create(dev, buffer);
    }
    else if (prectEx->index == GPU_CCOMMAND_GCHANNEL_FORMAT) {
        gpu_ext_handle_gchanel_format(dev, buffer);
    }
    else if (prectEx->index == GPU_CCOMMAND_GCHANNEL_DATA) {
        gpu_ext_handle_gchanel_data(dev, buffer);
    }
    else if (prectEx->index == GPU_CCOMMAND_GCHANNEL_CURSOR) {
        gpu_ext_handle_gchanel_cursor(dev, buffer);
    }
    else{
        if(!g_gpu_ext->channel_is_connected(0)){
            gpu_client_connected(0);
            return;
        }
        gpu_ext_handle_rect3(buffer, len);
    }
}

#ifdef GPU_RINGBUFFER
static void gpu_data_ring(gpunvidia_data_ring *r)
{
    r->rb = ringbuffer_create(GPU_MAX_WIDTH * GPU_MAX_HEIGHT * 8);
    r->maxlen = 0;
}

static int gpu_handle_data()
{
    size_t rbytes;

    if(g_gpunvidia_data_ring.maxlen == 0){
        rbytes = ringbuffer_read_space(g_gpunvidia_data_ring.rb);

        if(rbytes < sizeof(int)){
            return 0;
        }

        ringbuffer_read(g_gpunvidia_data_ring.rb, (char *)&g_gpunvidia_data_ring.maxlen, sizeof(int));
    }

    rbytes = ringbuffer_read_space(g_gpunvidia_data_ring.rb);
    if(rbytes < g_gpunvidia_data_ring.maxlen){
        return 0;
    }

    ringbuffer_read(g_gpunvidia_data_ring.rb, g_frame_buffer, g_gpunvidia_data_ring.maxlen);
    gpu_ext_handle_diff(g_frame_buffer, g_gpunvidia_data_ring.maxlen);

    g_gpunvidia_data_ring.maxlen = 0;
    return 1;
}
#else
static void gpu_reader_init(void)
{
    g_reader = (GpuReader *)g_frame_buffer;
    g_reader->hdr_pos = 0u;
    g_reader->hdr.size = 0u;
    g_reader->handlled = 0u;
}

static void gpu_reader_reset(void)
{
    g_reader->hdr.size = 0u;
    g_reader->hdr_pos = 0u;
    g_reader->handlled = 0u;
}

static void gpu_mon_reset(void)
{
    gpu_monitor_ext *m;
    for(int i=0; i<GPU_MAX_MONITORS; i++){
        m = reds_gpu_get_ext(i);
        m->g_stream_item_couter = 0;
        m->g_stream_created_couter = 0;
        m->g_release = 1;
        m->is_first_frame = 1;
        m->m_stream_state = GPU_STREAM_INIT;
        gpu_ext_statistic_print(m, FALSE);
        gpu_qxl_wakeup(reds_gpu_get_qxl(i));
    }

    gpu_ext_flush_image_item();
    gpu_ext_flush_stream_item();
    for(int i=0; i<GPU_MAX_MONITORS; i++){
        m = reds_gpu_get_ext(i);
        m->m_debug_local_frame_time = 0u;
        gpu_ext_statistic_print(m, TRUE);
    }
}

static bool gpu_handle_msg_data(SpiceCharDeviceInstance *sin)
{
    SpiceCharDeviceInterface *sif = spice_char_device_get_interface(sin);
    int n;
    uint32_t pos = 0;
    GpuReader *dev = g_reader;
    while (dev->hdr_pos != (dev->hdr.size + sizeof(dev->hdr))) {
        pos = dev->hdr_pos - sizeof(dev->hdr);
        n = sif->read(sin, dev->hdr_data + pos , dev->hdr.size - pos > (64*1024 +32) ? (64*1024 +32) : dev->hdr.size - pos);
        if (n <= 0) {
            break;
        }

        dev->hdr_pos += n;
    }

    return (dev->hdr.size + sizeof(dev->hdr)) == dev->hdr_pos;
}

#endif

static void gpu_client_connected(int connected)
{
    int total_size = GPU_PORT_BUFFER;
    RedCharDevice *char_dev;
    RedCharDeviceWriteBuffer *buf;

    char_dev = g_gpu_ext->m_charDev;

    spice_return_if_fail(char_dev != NULL);

    if(connected == 0){
        g_gpu_ext->g_start_stream = 0;
    }

    buf = red_char_device_write_buffer_get_server_no_token(char_dev, total_size);
    spice_return_if_fail(buf != NULL);
    buf->buf_used = total_size;

    GpuMsg *msg = (GpuMsg *)buf->buf;
    msg->size = total_size - sizeof(int);
    msg->cmd = GPU_COMMAND_ACK_CLIENT;
    sprintf((char *)msg->data, "%d", connected);

    red_char_device_write_buffer_add(char_dev, buf);
}

static void gpu_client_finish_resolve(void)
{
    int total_size = GPU_PORT_BUFFER;
    RedCharDevice *char_dev;
    RedCharDeviceWriteBuffer *buf;

    char_dev = g_gpu_ext->m_charDev;

    buf = red_char_device_write_buffer_get_server_no_token(char_dev, total_size);
    buf->buf_used = total_size;

    GpuMsg *msg = (GpuMsg *)buf->buf;
    msg->size = total_size - sizeof(int);
    msg->cmd = GPU_COMMAND_ACK_RESOLVE;
    sprintf((char *)msg->data, "resolve finish");

    red_char_device_write_buffer_add(char_dev, buf);
}

static void gpu_client_drop_stream(void)
{
    int total_size = GPU_PORT_BUFFER;
    RedCharDevice *char_dev;
    RedCharDeviceWriteBuffer *buf;

    char_dev = g_gpu_ext->m_charDev;

    buf = red_char_device_write_buffer_get_server_no_token(char_dev, total_size);
    buf->buf_used = total_size;

    GpuMsg *msg = (GpuMsg *)buf->buf;
    msg->size = total_size - sizeof(int);
    msg->cmd = GPU_COMMAND_DROP_STREAM;
    sprintf((char *)msg->data, "drop stream");

    red_char_device_write_buffer_add(char_dev, buf);
}

static void gpu_client_sys_ack(int mon_id, uint32_t nums)
{
    int total_size = GPU_PORT_BUFFER;
    RedCharDevice *char_dev;
    RedCharDeviceWriteBuffer *buf;
    int *pid;
    uint32_t *pnums;

    char_dev = g_gpu_ext->m_charDev;

    buf = red_char_device_write_buffer_get_server_no_token(char_dev, total_size);
    buf->buf_used = total_size;

    GpuMsg *msg = (GpuMsg *)buf->buf;
    msg->size = total_size - sizeof(int);
    msg->cmd = GPU_COMMAND_ACK_SYS;
    pid = (int *)msg->data;
    pnums = (uint32_t *)(msg->data + sizeof(int));
    *pid = mon_id;
    *pnums = nums;

    red_char_device_write_buffer_add(char_dev, buf);
}

static void gpu_client_performance_test(uint8_t *buffer, 
    int compress_size, float compress_cost, float uncompress_cost)
{
    PerfReqInfo *perfReqInfo = (PerfReqInfo *)buffer;

    int total_size = GPU_PORT_BUFFER;
    RedCharDevice *char_dev;
    RedCharDeviceWriteBuffer *buf;
    PerfInfo *pi;

    char_dev = g_gpu_ext->m_charDev;

    buf = red_char_device_write_buffer_get_server_no_token(char_dev, total_size);
    buf->buf_used = total_size;

    GpuMsg *msg = (GpuMsg *)buf->buf;
    msg->size = total_size - sizeof(int);
    msg->cmd = GPU_COMMAND_ACK_PERF;
    pi = (PerfInfo *)msg->data;
    pi->number = perfReqInfo->number;
    pi->raw_size = perfReqInfo->raw_size;
    pi->timestamp = perfReqInfo->timestamp;
    pi->compress_cost = compress_cost;
    pi->compress_size = compress_size;
    pi->uncompress_cost = uncompress_cost;

    red_char_device_write_buffer_add(char_dev, buf);
}

static void gpu_client_stream_ready(int mon_id)
{
    int total_size = GPU_PORT_BUFFER;
    RedCharDevice *char_dev;
    RedCharDeviceWriteBuffer *buf;
    int *monitor;

    char_dev = g_gpu_ext->m_charDev;

    buf = red_char_device_write_buffer_get_server_no_token(char_dev, total_size);
    buf->buf_used = total_size;

    GpuMsg *msg = (GpuMsg *)buf->buf;
    msg->size = total_size - sizeof(int);
    msg->cmd = GPU_COMMAND_ACK_STREAM_ENABLE;
    monitor = (int *)msg->data;
    *monitor = mon_id;

    red_char_device_write_buffer_add(char_dev, buf);
}

static void gpu_client_notify_guest_gchannel_state(int mon_id, int connected)
{
    int total_size = GPU_PORT_BUFFER;
    RedCharDevice *char_dev;
    RedCharDeviceWriteBuffer *buf;

    char_dev = g_gpu_ext->m_charDev;

    buf = red_char_device_write_buffer_get_server_no_token(char_dev, total_size);
    buf->buf_used = total_size;

    GpuMsg *msg = (GpuMsg *)buf->buf;
    msg->size = total_size - sizeof(int);
    msg->cmd = GPU_COMMAND_ACK_GCHANNEL_STATE;
    NofityGuestGchanelState *state = (NofityGuestGchanelState *)msg->data;
    state->monitor_id = mon_id;
    state->state = connected;

    red_char_device_write_buffer_add(char_dev, buf);
}

/*
* GPUNVIDIA CLASS DEFINE
*/
#define TYPE_GPUNVIDIA_DEVICE gpunvidia_device_get_type()

#define GPUNVIDIA_DEVICE(obj) \
    (G_TYPE_CHECK_INSTANCE_CAST((obj),TYPE_GPUNVIDIA_DEVICE, GpuNvidiaDevice))
#define GPUNVIDIA_DEVICE_CLASS(klass) \
    (G_TYPE_CHECK_CLASS_CAST((klass), TYPE_GPUNVIDIA_DEVICE, GpuNvidiaDeviceClass))
#define GPUNVIDIA_DEVICE_GET_CLASS(obj) \
    (G_TYPE_INSTANCE_GET_CLASS((obj), TYPE_GPUNVIDIA_DEVICE, GpuNvidiaDeviceClass))

typedef struct GpuNvidiaDevice GpuNvidiaDevice;
typedef struct GpuNvidiaDeviceClass GpuNvidiaDeviceClass;

struct GpuNvidiaDevice {
    RedCharDevice parent;
    GpuNvidiaChannel *gpunvidia_channel[GPU_MAX_MONITORS];
    CursorChannel *cursor_channel[GPU_MAX_MONITORS];
};

struct GpuNvidiaDeviceClass {
    RedCharDeviceClass parent_class;
};

static GType gpunvidia_device_get_type(void) G_GNUC_CONST;
static GpuNvidiaDevice *gpunvidia_device_new(SpiceCharDeviceInstance *sin, RedsState *reds);
static void gpunvidia_allocate_channels(GpuNvidiaDevice *dev, int monitor_id);

G_DEFINE_TYPE(GpuNvidiaDevice, gpunvidia_device, RED_TYPE_CHAR_DEVICE)


RedCharDevice *
gpunvidia_device_connect(RedsState *reds, SpiceCharDeviceInstance *sin)
{
    SpiceCharDeviceInterface *sif;

    GpuNvidiaDevice *dev = gpunvidia_device_new(sin, reds);

    if(dev == NULL) {
        spice_warning("create gpu device faild.");
        return NULL;
    }

    gpunvidia_device_set_dispose(FALSE);
    sif = spice_char_device_get_interface(sin);
    if (sif->state) {
        sif->state(sin, 1);
    }

    //spice_warning("%s:%d", __FUNCTION__, __LINE__);

    gpu_monitor_ext *mon_ext;
    if(g_gpu_ext){
        g_gpu_ext->m_charDev = (void *)RED_CHAR_DEVICE(dev);
    }

    // check default ack
    reds_sync_reset_all();

    return RED_CHAR_DEVICE(dev);
}

static void
gpunvidia_mon_ext_reset_channels(GpuNvidiaDevice *dev)
{
    for(int i=0; i<GPU_MAX_MONITORS; i++){
        if(g_gpu_ext->g_mon_ext[i]->m_gpunvidia_channel != NULL 
            && red_channel_is_connected(RED_CHANNEL(dev->gpunvidia_channel[i]))){
            spice_printerr("### %s:%d reset channel", __FUNCTION__, __LINE__);
            gpunvidia_channel_reset(dev->gpunvidia_channel[i]);
            //cursor_channel_reset(dev->cursor_channel[i]);
        }

        if(g_gpu_ext->g_mon_ext[i]->m_gpunvidia_channel == NULL
            && dev->gpunvidia_channel[i] != NULL){
            spice_printerr("### %s:%d destroy channel", __FUNCTION__, __LINE__);
            red_channel_destroy(RED_CHANNEL(dev->gpunvidia_channel[i]));
            dev->gpunvidia_channel[i] = NULL;
            red_channel_destroy(RED_CHANNEL(dev->cursor_channel[i]));
            dev->cursor_channel[i] = NULL;
        }
   }

   gpu_qxl_wakeup_all();
}

static void
gpunvidia_device_dispose(GObject *object)
{
    GpuNvidiaDevice *dev = GPUNVIDIA_DEVICE(object);

    //spice_warning("%s:%d", __FUNCTION__, __LINE__);

    // TODO, memory leak
    g_gpu_ext->g_trigger = 0;
    g_gpu_ext->m_prep_stream_state = GPU_PREP_STREAM_INIT;
    g_gpu_ext->m_is_gpu_stream_mode = 0;
    g_gpu_ext->m_overstock = 0;
    //g_gpu_ext->g_start_stream = 0;
    gpu_mon_reset();
#ifdef GPU_RINGBUFFER
    ringbuffer_reset(g_gpunvidia_data_ring.rb);
#else
    gpu_reader_reset();
#endif

    gpunvidia_mon_ext_reset_channels(dev);
    gpunvidia_device_set_dispose(TRUE);
    gpu_ext_statistic_reset_all();
}

void
gpunvidia_device_disconnect(RedsState *reds, SpiceCharDeviceInstance *sin)
{
    if(sin->st){
        g_object_unref(GPUNVIDIA_DEVICE(sin->st));
        sin->st = NULL;
    }
}

static RedPipeItem *
gpunvidia_device_read_msg_from_dev(RedCharDevice *self, SpiceCharDeviceInstance *sin)
{
#ifdef GPU_RINGBUFFER
    SpiceCharDeviceInterface *sif;
    int n;
    size_t wbytes;

    uint8_t buf[64*1024+32]={0};

    sif = spice_char_device_get_interface(sin);
    n = sif->read(sin, (uint8_t *)buf,sizeof(buf));
    if(n>0){
        wbytes = ringbuffer_write_space(g_gpunvidia_data_ring.rb);
        if(n > wbytes){
            spice_warning("gpu ring write space too small. data:%d, space:%lu", n, wbytes);
            return NULL;
        }

        ringbuffer_write(g_gpunvidia_data_ring.rb, (char *)buf, n);

        while(gpu_handle_data()){
        }
    }
    else{
        spice_warning("read gpu faild. n:%d", n);
    }
    return NULL;	
#else
    SpiceCharDeviceInterface *sif;
    int n;
    bool handled = false;
    GpuReader *dev = g_reader;

    gpu_ext_handle_ack(NULL, NULL);

    if(dev->handlled == 1u){
        gpu_ext_handle_diff(self, dev->hdr_data, dev->hdr.size, &dev->handlled);
        if(dev->handlled == 0u){
            dev->hdr_pos = 0u;
            dev->hdr.size = 0u;
        }
        return NULL;
    }

    sif = spice_char_device_get_interface(sin);

    /* read header */
    while (dev->hdr_pos < sizeof(dev->hdr)) {
        n = sif->read(sin, (uint8_t *) &dev->hdr + dev->hdr_pos, sizeof(dev->hdr) - dev->hdr_pos);
        if (n <= 0) {
            return NULL;
        }
        dev->hdr_pos += n;
    }

    /* read data */
    handled = gpu_handle_msg_data(sin);

    if (handled) {
        /* handle data */
        dev->handlled = 1u;
        gpu_ext_handle_diff(self, dev->hdr_data, dev->hdr.size, &dev->handlled);
        if(dev->handlled == 0u){
            dev->hdr_pos = 0u;
            dev->hdr.size = 0u;
        }
    }

    return NULL;
#endif
}

static void
gpunvidia_device_send_msg_to_client(RedCharDevice *self, RedPipeItem *msg, RedClient *client)
{
    spice_warning("gpunvidia_device_send_msg_to_client");
}

static void
gpunvidia_device_send_tokens_to_client(RedCharDevice *self, RedClient *client, uint32_t tokens)
{
    spice_printerr("Not implemented!");
}

static void 
gpunvidia_device_change_format(RedCharDevice *self, const GchannelFormat *format)
{
    spice_printerr("### %s:%d monitor %d changed format %dx%d",
        __FUNCTION__, __LINE__, format->monitor_id, format->width, format->height);
    GpuNvidiaDevice *dev = GPUNVIDIA_DEVICE(self);

    StreamMsgFormat fmt;
    memset(&fmt, 0, sizeof(fmt));
    fmt.width = format->width;
    fmt.height = format->height;
    fmt.codec = SPICE_VIDEO_CODEC_TYPE_H264;
    gpunvidia_channel_change_format(dev->gpunvidia_channel[format->monitor_id], &fmt);
}

static void
gpunvidia_device_stream_start(void *opaque G_GNUC_UNUSED, int start,
                           GpuNvidiaChannel *channel)
{
    if(start == 1){
        int mon_id = gpunvidia_channel_get_monitor_id(channel);
        spice_printerr("monitor %d channel connected", mon_id);
        gpu_client_notify_guest_gchannel_state(mon_id, 1);
    }else if (start == 0) {
        gpu_client_connected(0);
    }
}

static void
gpunvidia_device_remove_client(RedCharDevice *self, RedClient *client)
{
    spice_warning("gpunvidia_device_remove_client");
}

static void
gpunvidia_allocate_channels(GpuNvidiaDevice *dev, int monitor_id)
{
    spice_warning("### %s:%d ", __FUNCTION__, __LINE__);

    if (dev->gpunvidia_channel[monitor_id]) {
        return;
    }

    if(g_gpu_ext->g_mon_ext[monitor_id]->m_gpunvidia_channel){
        spice_printerr("### %s:%d monitor %d channel has exsit", __FUNCTION__, __LINE__, monitor_id);
        dev->gpunvidia_channel[monitor_id] =(GpuNvidiaChannel*)(g_gpu_ext->g_mon_ext[monitor_id]->m_gpunvidia_channel);
        dev->cursor_channel[monitor_id] = (CursorChannel *)(g_gpu_ext->g_mon_ext[monitor_id]->m_gpunvidia_cursor_channel);
        gpunvidia_device_stream_start(dev, 1, dev->gpunvidia_channel[monitor_id]);
        return ;
    }else{
        spice_printerr("### %s:%d monitor %d create new channel", __FUNCTION__, __LINE__, monitor_id);
    }

    SpiceServer* reds = red_char_device_get_server(RED_CHAR_DEVICE(dev));
    SpiceCoreInterfaceInternal* core = reds_get_core_interface(reds);

    int id = reds_get_free_channel_id(reds, SPICE_CHANNEL_DISPLAY);
    g_return_if_fail(id >= 0);

    GpuNvidiaChannel *gpunvidia_channel = gpunvidia_channel_new(reds, id);
    gpunvidia_channel_set_monitor_id(gpunvidia_channel, monitor_id);
    CursorChannel *cursor_channel = cursor_channel_new(reds, id, core);
    cursor_channel_set_stream_cursor(cursor_channel, true);
    g_gpu_ext->g_mon_ext[monitor_id]->m_gpunvidia_channel = gpunvidia_channel;
    g_gpu_ext->g_mon_ext[monitor_id]->m_gpunvidia_cursor_channel = cursor_channel;

    dev->gpunvidia_channel[monitor_id] = gpunvidia_channel;
    dev->cursor_channel[monitor_id] = cursor_channel;

    gpunvidia_channel_register_start_cb(gpunvidia_channel, gpunvidia_device_stream_start, dev);
}

static void
gpunvidia_device_port_event(RedCharDevice *char_dev, uint8_t event)
{
    spice_warning("gpunvidia_device_port_event");
}

static void
gpunvidia_device_class_init(GpuNvidiaDeviceClass *klass)
{
    GObjectClass *object_class = G_OBJECT_CLASS(klass);
    RedCharDeviceClass *char_dev_class = RED_CHAR_DEVICE_CLASS(klass);

#ifdef GPU_RINGBUFFER
    gpu_data_ring(&g_gpunvidia_data_ring);
#else
    gpu_reader_init();
#endif
    gpu_ext_init(&g_gpu_ext);

    object_class->dispose = gpunvidia_device_dispose;

    char_dev_class->read_one_msg_from_device = gpunvidia_device_read_msg_from_dev;
    char_dev_class->send_msg_to_client = gpunvidia_device_send_msg_to_client;
    char_dev_class->send_tokens_to_client = gpunvidia_device_send_tokens_to_client;
    char_dev_class->remove_client = gpunvidia_device_remove_client;
    char_dev_class->port_event = gpunvidia_device_port_event;
}

static void
gpunvidia_device_init(GpuNvidiaDevice *self)
{

}

static GpuNvidiaDevice *
gpunvidia_device_new(SpiceCharDeviceInstance *sin, RedsState *reds)
{
    return g_object_new(TYPE_GPUNVIDIA_DEVICE,
                        "sin", sin,
                        "spice-server", reds,
                        "client-tokens-interval", 0ULL,
                        "self-tokens", ~0ULL,
                        NULL);
}

static void
gpunvidia_device_stream_data(RedCharDevice *self, const void *data, 
    size_t size, uint32_t mm_time, int monitor_id)
{
    GpuNvidiaDevice *dev = GPUNVIDIA_DEVICE(self);

    gpunvidia_channel_send_data(dev->gpunvidia_channel[monitor_id], data, size, mm_time);
}

static void
gpunvidia_device_create_channel(RedCharDevice *self, int monitor_id)
{
    GpuNvidiaDevice *dev = GPUNVIDIA_DEVICE(self);

    gpunvidia_allocate_channels(dev, monitor_id);
}

static void
gpunvidia_device_cursor_cmd(RedCharDevice *self, RedCursorCmd *cmd, int monitor_id)
{
    GpuNvidiaDevice *dev = GPUNVIDIA_DEVICE(self);

    cursor_channel_process_cmd(dev->cursor_channel[monitor_id], cmd);
}

